mm: convert zone->managed_pages to atomic variable
[linux-2.6-block.git] / kernel / power / snapshot.c
CommitLineData
25761b6e 1/*
96bc7aec 2 * linux/kernel/power/snapshot.c
25761b6e 3 *
8357376d 4 * This file provides system snapshot/restore functionality for swsusp.
25761b6e 5 *
a2531293 6 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
8357376d 7 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
25761b6e 8 *
8357376d 9 * This file is released under the GPLv2.
25761b6e
RW
10 *
11 */
12
64ec72a1
JP
13#define pr_fmt(fmt) "PM: " fmt
14
f577eb30 15#include <linux/version.h>
25761b6e
RW
16#include <linux/module.h>
17#include <linux/mm.h>
18#include <linux/suspend.h>
25761b6e 19#include <linux/delay.h>
25761b6e 20#include <linux/bitops.h>
25761b6e 21#include <linux/spinlock.h>
25761b6e 22#include <linux/kernel.h>
25761b6e
RW
23#include <linux/pm.h>
24#include <linux/device.h>
74dfd666 25#include <linux/init.h>
57c8a661 26#include <linux/memblock.h>
38b8d208 27#include <linux/nmi.h>
25761b6e
RW
28#include <linux/syscalls.h>
29#include <linux/console.h>
30#include <linux/highmem.h>
846705de 31#include <linux/list.h>
5a0e3ad6 32#include <linux/slab.h>
52f5684c 33#include <linux/compiler.h>
db597605 34#include <linux/ktime.h>
61f6d09a 35#include <linux/set_memory.h>
25761b6e 36
7c0f6ba6 37#include <linux/uaccess.h>
25761b6e
RW
38#include <asm/mmu_context.h>
39#include <asm/pgtable.h>
40#include <asm/tlbflush.h>
41#include <asm/io.h>
42
25761b6e
RW
43#include "power.h"
44
49368a47 45#if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
4c0b6c10
RW
46static bool hibernate_restore_protection;
47static bool hibernate_restore_protection_active;
48
49void enable_restore_image_protection(void)
50{
51 hibernate_restore_protection = true;
52}
53
54static inline void hibernate_restore_protection_begin(void)
55{
56 hibernate_restore_protection_active = hibernate_restore_protection;
57}
58
59static inline void hibernate_restore_protection_end(void)
60{
61 hibernate_restore_protection_active = false;
62}
63
64static inline void hibernate_restore_protect_page(void *page_address)
65{
66 if (hibernate_restore_protection_active)
67 set_memory_ro((unsigned long)page_address, 1);
68}
69
70static inline void hibernate_restore_unprotect_page(void *page_address)
71{
72 if (hibernate_restore_protection_active)
73 set_memory_rw((unsigned long)page_address, 1);
74}
75#else
76static inline void hibernate_restore_protection_begin(void) {}
77static inline void hibernate_restore_protection_end(void) {}
78static inline void hibernate_restore_protect_page(void *page_address) {}
79static inline void hibernate_restore_unprotect_page(void *page_address) {}
49368a47 80#endif /* CONFIG_STRICT_KERNEL_RWX && CONFIG_ARCH_HAS_SET_MEMORY */
4c0b6c10 81
74dfd666
RW
82static int swsusp_page_is_free(struct page *);
83static void swsusp_set_page_forbidden(struct page *);
84static void swsusp_unset_page_forbidden(struct page *);
85
ddeb6487
RW
86/*
87 * Number of bytes to reserve for memory allocations made by device drivers
88 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
89 * cause image creation to fail (tunable via /sys/power/reserved_size).
90 */
91unsigned long reserved_size;
92
93void __init hibernate_reserved_size_init(void)
94{
95 reserved_size = SPARE_PAGES * PAGE_SIZE;
96}
97
fe419535
RW
98/*
99 * Preferred image size in bytes (tunable via /sys/power/image_size).
1c1be3a9
RW
100 * When it is set to N, swsusp will do its best to ensure the image
101 * size will not exceed N bytes, but if that is impossible, it will
102 * try to create the smallest image possible.
fe419535 103 */
ac5c24ec
RW
104unsigned long image_size;
105
106void __init hibernate_image_size_init(void)
107{
1c1be3a9 108 image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
ac5c24ec 109}
fe419535 110
ef96f639
RW
111/*
112 * List of PBEs needed for restoring the pages that were allocated before
8357376d
RW
113 * the suspend and included in the suspend image, but have also been
114 * allocated by the "resume" kernel, so their contents cannot be written
115 * directly to their "original" page frames.
116 */
75534b50
RW
117struct pbe *restore_pblist;
118
9c744481
RW
119/* struct linked_page is used to build chains of pages */
120
121#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
122
123struct linked_page {
124 struct linked_page *next;
125 char data[LINKED_PAGE_DATA_SIZE];
126} __packed;
127
128/*
129 * List of "safe" pages (ie. pages that were not used by the image kernel
130 * before hibernation) that may be used as temporary storage for image kernel
131 * memory contents.
132 */
133static struct linked_page *safe_pages_list;
134
8357376d 135/* Pointer to an auxiliary buffer (1 page) */
940864dd 136static void *buffer;
7088a5c0 137
0bcd888d
RW
138#define PG_ANY 0
139#define PG_SAFE 1
140#define PG_UNSAFE_CLEAR 1
141#define PG_UNSAFE_KEEP 0
142
940864dd 143static unsigned int allocated_unsafe_pages;
f6143aa6 144
ef96f639
RW
145/**
146 * get_image_page - Allocate a page for a hibernation image.
147 * @gfp_mask: GFP mask for the allocation.
148 * @safe_needed: Get pages that were not used before hibernation (restore only)
149 *
150 * During image restoration, for storing the PBE list and the image data, we can
151 * only use memory pages that do not conflict with the pages used before
152 * hibernation. The "unsafe" pages have PageNosaveFree set and we count them
153 * using allocated_unsafe_pages.
154 *
155 * Each allocated image page is marked as PageNosave and PageNosaveFree so that
156 * swsusp_free() can release it.
157 */
8357376d 158static void *get_image_page(gfp_t gfp_mask, int safe_needed)
f6143aa6
RW
159{
160 void *res;
161
162 res = (void *)get_zeroed_page(gfp_mask);
163 if (safe_needed)
7be98234 164 while (res && swsusp_page_is_free(virt_to_page(res))) {
f6143aa6 165 /* The page is unsafe, mark it for swsusp_free() */
7be98234 166 swsusp_set_page_forbidden(virt_to_page(res));
940864dd 167 allocated_unsafe_pages++;
f6143aa6
RW
168 res = (void *)get_zeroed_page(gfp_mask);
169 }
170 if (res) {
7be98234
RW
171 swsusp_set_page_forbidden(virt_to_page(res));
172 swsusp_set_page_free(virt_to_page(res));
f6143aa6
RW
173 }
174 return res;
175}
176
9c744481
RW
177static void *__get_safe_page(gfp_t gfp_mask)
178{
179 if (safe_pages_list) {
180 void *ret = safe_pages_list;
181
182 safe_pages_list = safe_pages_list->next;
183 memset(ret, 0, PAGE_SIZE);
184 return ret;
185 }
186 return get_image_page(gfp_mask, PG_SAFE);
187}
188
f6143aa6
RW
189unsigned long get_safe_page(gfp_t gfp_mask)
190{
9c744481 191 return (unsigned long)__get_safe_page(gfp_mask);
8357376d
RW
192}
193
5b6d15de
RW
194static struct page *alloc_image_page(gfp_t gfp_mask)
195{
8357376d
RW
196 struct page *page;
197
198 page = alloc_page(gfp_mask);
199 if (page) {
7be98234
RW
200 swsusp_set_page_forbidden(page);
201 swsusp_set_page_free(page);
8357376d
RW
202 }
203 return page;
f6143aa6
RW
204}
205
307c5971
RW
206static void recycle_safe_page(void *page_address)
207{
208 struct linked_page *lp = page_address;
209
210 lp->next = safe_pages_list;
211 safe_pages_list = lp;
212}
213
f6143aa6 214/**
ef96f639
RW
215 * free_image_page - Free a page allocated for hibernation image.
216 * @addr: Address of the page to free.
217 * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
218 *
219 * The page to free should have been allocated by get_image_page() (page flags
220 * set by it are affected).
f6143aa6 221 */
f6143aa6
RW
222static inline void free_image_page(void *addr, int clear_nosave_free)
223{
8357376d
RW
224 struct page *page;
225
226 BUG_ON(!virt_addr_valid(addr));
227
228 page = virt_to_page(addr);
229
7be98234 230 swsusp_unset_page_forbidden(page);
f6143aa6 231 if (clear_nosave_free)
7be98234 232 swsusp_unset_page_free(page);
8357376d
RW
233
234 __free_page(page);
f6143aa6
RW
235}
236
efd5a852
RW
237static inline void free_list_of_pages(struct linked_page *list,
238 int clear_page_nosave)
b788db79
RW
239{
240 while (list) {
241 struct linked_page *lp = list->next;
242
243 free_image_page(list, clear_page_nosave);
244 list = lp;
245 }
246}
247
ef96f639
RW
248/*
249 * struct chain_allocator is used for allocating small objects out of
250 * a linked list of pages called 'the chain'.
251 *
252 * The chain grows each time when there is no room for a new object in
253 * the current page. The allocated objects cannot be freed individually.
254 * It is only possible to free them all at once, by freeing the entire
255 * chain.
256 *
257 * NOTE: The chain allocator may be inefficient if the allocated objects
258 * are not much smaller than PAGE_SIZE.
259 */
b788db79
RW
260struct chain_allocator {
261 struct linked_page *chain; /* the chain */
262 unsigned int used_space; /* total size of objects allocated out
ef96f639 263 of the current page */
b788db79
RW
264 gfp_t gfp_mask; /* mask for allocating pages */
265 int safe_needed; /* if set, only "safe" pages are allocated */
266};
267
efd5a852
RW
268static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
269 int safe_needed)
b788db79
RW
270{
271 ca->chain = NULL;
272 ca->used_space = LINKED_PAGE_DATA_SIZE;
273 ca->gfp_mask = gfp_mask;
274 ca->safe_needed = safe_needed;
275}
276
277static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
278{
279 void *ret;
280
281 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
282 struct linked_page *lp;
283
9c744481
RW
284 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
285 get_image_page(ca->gfp_mask, PG_ANY);
b788db79
RW
286 if (!lp)
287 return NULL;
288
289 lp->next = ca->chain;
290 ca->chain = lp;
291 ca->used_space = 0;
292 }
293 ret = ca->chain->data + ca->used_space;
294 ca->used_space += size;
295 return ret;
296}
297
b788db79 298/**
ef96f639 299 * Data types related to memory bitmaps.
b788db79 300 *
ef96f639
RW
301 * Memory bitmap is a structure consiting of many linked lists of
302 * objects. The main list's elements are of type struct zone_bitmap
303 * and each of them corresonds to one zone. For each zone bitmap
304 * object there is a list of objects of type struct bm_block that
305 * represent each blocks of bitmap in which information is stored.
b788db79 306 *
ef96f639
RW
307 * struct memory_bitmap contains a pointer to the main list of zone
308 * bitmap objects, a struct bm_position used for browsing the bitmap,
309 * and a pointer to the list of pages used for allocating all of the
310 * zone bitmap objects and bitmap block objects.
b788db79 311 *
ef96f639
RW
312 * NOTE: It has to be possible to lay out the bitmap in memory
313 * using only allocations of order 0. Additionally, the bitmap is
314 * designed to work with arbitrary number of zones (this is over the
315 * top for now, but let's avoid making unnecessary assumptions ;-).
b788db79 316 *
ef96f639
RW
317 * struct zone_bitmap contains a pointer to a list of bitmap block
318 * objects and a pointer to the bitmap block object that has been
319 * most recently used for setting bits. Additionally, it contains the
320 * PFNs that correspond to the start and end of the represented zone.
b788db79 321 *
ef96f639
RW
322 * struct bm_block contains a pointer to the memory page in which
323 * information is stored (in the form of a block of bitmap)
324 * It also contains the pfns that correspond to the start and end of
325 * the represented memory area.
f469f02d 326 *
ef96f639
RW
327 * The memory bitmap is organized as a radix tree to guarantee fast random
328 * access to the bits. There is one radix tree for each zone (as returned
329 * from create_mem_extents).
f469f02d 330 *
ef96f639
RW
331 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
332 * two linked lists for the nodes of the tree, one for the inner nodes and
333 * one for the leave nodes. The linked leave nodes are used for fast linear
334 * access of the memory bitmap.
f469f02d 335 *
ef96f639 336 * The struct rtree_node represents one node of the radix tree.
b788db79
RW
337 */
338
339#define BM_END_OF_MAP (~0UL)
340
8de03073 341#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
f469f02d
JR
342#define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
343#define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
b788db79 344
f469f02d
JR
345/*
346 * struct rtree_node is a wrapper struct to link the nodes
347 * of the rtree together for easy linear iteration over
348 * bits and easy freeing
349 */
350struct rtree_node {
351 struct list_head list;
352 unsigned long *data;
353};
354
355/*
356 * struct mem_zone_bm_rtree represents a bitmap used for one
357 * populated memory zone.
358 */
359struct mem_zone_bm_rtree {
360 struct list_head list; /* Link Zones together */
361 struct list_head nodes; /* Radix Tree inner nodes */
362 struct list_head leaves; /* Radix Tree leaves */
363 unsigned long start_pfn; /* Zone start page frame */
364 unsigned long end_pfn; /* Zone end page frame + 1 */
365 struct rtree_node *rtree; /* Radix Tree Root */
366 int levels; /* Number of Radix Tree Levels */
367 unsigned int blocks; /* Number of Bitmap Blocks */
368};
369
b788db79
RW
370/* strcut bm_position is used for browsing memory bitmaps */
371
372struct bm_position {
3a20cb17
JR
373 struct mem_zone_bm_rtree *zone;
374 struct rtree_node *node;
375 unsigned long node_pfn;
376 int node_bit;
b788db79
RW
377};
378
379struct memory_bitmap {
f469f02d 380 struct list_head zones;
b788db79 381 struct linked_page *p_list; /* list of pages used to store zone
ef96f639
RW
382 bitmap objects and bitmap block
383 objects */
b788db79
RW
384 struct bm_position cur; /* most recently used bit position */
385};
386
387/* Functions that operate on memory bitmaps */
388
f469f02d
JR
389#define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
390#if BITS_PER_LONG == 32
391#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
392#else
393#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
394#endif
395#define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
396
ef96f639
RW
397/**
398 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
f469f02d 399 *
ef96f639
RW
400 * This function is used to allocate inner nodes as well as the
401 * leave nodes of the radix tree. It also adds the node to the
402 * corresponding linked list passed in by the *list parameter.
f469f02d
JR
403 */
404static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
405 struct chain_allocator *ca,
406 struct list_head *list)
407{
408 struct rtree_node *node;
409
410 node = chain_alloc(ca, sizeof(struct rtree_node));
411 if (!node)
412 return NULL;
413
414 node->data = get_image_page(gfp_mask, safe_needed);
415 if (!node->data)
416 return NULL;
417
418 list_add_tail(&node->list, list);
419
420 return node;
421}
422
ef96f639
RW
423/**
424 * add_rtree_block - Add a new leave node to the radix tree.
f469f02d 425 *
ef96f639
RW
426 * The leave nodes need to be allocated in order to keep the leaves
427 * linked list in order. This is guaranteed by the zone->blocks
428 * counter.
f469f02d
JR
429 */
430static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
431 int safe_needed, struct chain_allocator *ca)
432{
433 struct rtree_node *node, *block, **dst;
434 unsigned int levels_needed, block_nr;
435 int i;
436
437 block_nr = zone->blocks;
438 levels_needed = 0;
439
440 /* How many levels do we need for this block nr? */
441 while (block_nr) {
442 levels_needed += 1;
443 block_nr >>= BM_RTREE_LEVEL_SHIFT;
444 }
445
446 /* Make sure the rtree has enough levels */
447 for (i = zone->levels; i < levels_needed; i++) {
448 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
449 &zone->nodes);
450 if (!node)
451 return -ENOMEM;
452
453 node->data[0] = (unsigned long)zone->rtree;
454 zone->rtree = node;
455 zone->levels += 1;
456 }
457
458 /* Allocate new block */
459 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
460 if (!block)
461 return -ENOMEM;
462
463 /* Now walk the rtree to insert the block */
464 node = zone->rtree;
465 dst = &zone->rtree;
466 block_nr = zone->blocks;
467 for (i = zone->levels; i > 0; i--) {
468 int index;
469
470 if (!node) {
471 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
472 &zone->nodes);
473 if (!node)
474 return -ENOMEM;
475 *dst = node;
476 }
477
478 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
479 index &= BM_RTREE_LEVEL_MASK;
480 dst = (struct rtree_node **)&((*dst)->data[index]);
481 node = *dst;
482 }
483
484 zone->blocks += 1;
485 *dst = block;
486
487 return 0;
488}
489
490static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
491 int clear_nosave_free);
492
ef96f639
RW
493/**
494 * create_zone_bm_rtree - Create a radix tree for one zone.
f469f02d 495 *
ef96f639
RW
496 * Allocated the mem_zone_bm_rtree structure and initializes it.
497 * This function also allocated and builds the radix tree for the
498 * zone.
f469f02d 499 */
efd5a852
RW
500static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
501 int safe_needed,
502 struct chain_allocator *ca,
503 unsigned long start,
504 unsigned long end)
f469f02d
JR
505{
506 struct mem_zone_bm_rtree *zone;
507 unsigned int i, nr_blocks;
508 unsigned long pages;
509
510 pages = end - start;
511 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
512 if (!zone)
513 return NULL;
514
515 INIT_LIST_HEAD(&zone->nodes);
516 INIT_LIST_HEAD(&zone->leaves);
517 zone->start_pfn = start;
518 zone->end_pfn = end;
519 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
520
521 for (i = 0; i < nr_blocks; i++) {
522 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
523 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
524 return NULL;
525 }
526 }
527
528 return zone;
529}
530
ef96f639
RW
531/**
532 * free_zone_bm_rtree - Free the memory of the radix tree.
f469f02d 533 *
ef96f639
RW
534 * Free all node pages of the radix tree. The mem_zone_bm_rtree
535 * structure itself is not freed here nor are the rtree_node
536 * structs.
f469f02d
JR
537 */
538static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
539 int clear_nosave_free)
540{
541 struct rtree_node *node;
542
543 list_for_each_entry(node, &zone->nodes, list)
544 free_image_page(node->data, clear_nosave_free);
545
546 list_for_each_entry(node, &zone->leaves, list)
547 free_image_page(node->data, clear_nosave_free);
548}
549
b788db79
RW
550static void memory_bm_position_reset(struct memory_bitmap *bm)
551{
3a20cb17
JR
552 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
553 list);
554 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
555 struct rtree_node, list);
556 bm->cur.node_pfn = 0;
557 bm->cur.node_bit = 0;
b788db79
RW
558}
559
560static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
561
846705de
RW
562struct mem_extent {
563 struct list_head hook;
564 unsigned long start;
565 unsigned long end;
566};
567
b788db79 568/**
ef96f639
RW
569 * free_mem_extents - Free a list of memory extents.
570 * @list: List of extents to free.
b788db79 571 */
846705de
RW
572static void free_mem_extents(struct list_head *list)
573{
574 struct mem_extent *ext, *aux;
b788db79 575
846705de
RW
576 list_for_each_entry_safe(ext, aux, list, hook) {
577 list_del(&ext->hook);
578 kfree(ext);
579 }
580}
581
582/**
ef96f639
RW
583 * create_mem_extents - Create a list of memory extents.
584 * @list: List to put the extents into.
585 * @gfp_mask: Mask to use for memory allocations.
586 *
587 * The extents represent contiguous ranges of PFNs.
846705de
RW
588 */
589static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
b788db79 590{
846705de 591 struct zone *zone;
b788db79 592
846705de 593 INIT_LIST_HEAD(list);
b788db79 594
ee99c71c 595 for_each_populated_zone(zone) {
846705de
RW
596 unsigned long zone_start, zone_end;
597 struct mem_extent *ext, *cur, *aux;
598
846705de 599 zone_start = zone->zone_start_pfn;
c33bc315 600 zone_end = zone_end_pfn(zone);
846705de
RW
601
602 list_for_each_entry(ext, list, hook)
603 if (zone_start <= ext->end)
604 break;
b788db79 605
846705de
RW
606 if (&ext->hook == list || zone_end < ext->start) {
607 /* New extent is necessary */
608 struct mem_extent *new_ext;
609
610 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
611 if (!new_ext) {
612 free_mem_extents(list);
613 return -ENOMEM;
614 }
615 new_ext->start = zone_start;
616 new_ext->end = zone_end;
617 list_add_tail(&new_ext->hook, &ext->hook);
618 continue;
619 }
620
621 /* Merge this zone's range of PFNs with the existing one */
622 if (zone_start < ext->start)
623 ext->start = zone_start;
624 if (zone_end > ext->end)
625 ext->end = zone_end;
626
627 /* More merging may be possible */
628 cur = ext;
629 list_for_each_entry_safe_continue(cur, aux, list, hook) {
630 if (zone_end < cur->start)
631 break;
632 if (zone_end < cur->end)
633 ext->end = cur->end;
634 list_del(&cur->hook);
635 kfree(cur);
636 }
b788db79 637 }
846705de
RW
638
639 return 0;
b788db79
RW
640}
641
642/**
ef96f639
RW
643 * memory_bm_create - Allocate memory for a memory bitmap.
644 */
efd5a852
RW
645static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
646 int safe_needed)
b788db79
RW
647{
648 struct chain_allocator ca;
846705de
RW
649 struct list_head mem_extents;
650 struct mem_extent *ext;
651 int error;
b788db79
RW
652
653 chain_init(&ca, gfp_mask, safe_needed);
f469f02d 654 INIT_LIST_HEAD(&bm->zones);
b788db79 655
846705de
RW
656 error = create_mem_extents(&mem_extents, gfp_mask);
657 if (error)
658 return error;
b788db79 659
846705de 660 list_for_each_entry(ext, &mem_extents, hook) {
f469f02d 661 struct mem_zone_bm_rtree *zone;
f469f02d
JR
662
663 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
664 ext->start, ext->end);
9047eb62
JR
665 if (!zone) {
666 error = -ENOMEM;
f469f02d 667 goto Error;
9047eb62 668 }
f469f02d 669 list_add_tail(&zone->list, &bm->zones);
b788db79 670 }
846705de 671
b788db79
RW
672 bm->p_list = ca.chain;
673 memory_bm_position_reset(bm);
846705de
RW
674 Exit:
675 free_mem_extents(&mem_extents);
676 return error;
b788db79 677
846705de 678 Error:
b788db79
RW
679 bm->p_list = ca.chain;
680 memory_bm_free(bm, PG_UNSAFE_CLEAR);
846705de 681 goto Exit;
b788db79
RW
682}
683
684/**
ef96f639
RW
685 * memory_bm_free - Free memory occupied by the memory bitmap.
686 * @bm: Memory bitmap.
687 */
b788db79
RW
688static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
689{
f469f02d 690 struct mem_zone_bm_rtree *zone;
b788db79 691
f469f02d
JR
692 list_for_each_entry(zone, &bm->zones, list)
693 free_zone_bm_rtree(zone, clear_nosave_free);
694
b788db79 695 free_list_of_pages(bm->p_list, clear_nosave_free);
846705de 696
f469f02d 697 INIT_LIST_HEAD(&bm->zones);
b788db79
RW
698}
699
700/**
ef96f639 701 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
07a33823 702 *
ef96f639
RW
703 * Find the bit in memory bitmap @bm that corresponds to the given PFN.
704 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
705 *
706 * Walk the radix tree to find the page containing the bit that represents @pfn
707 * and return the position of the bit in @addr and @bit_nr.
07a33823 708 */
9047eb62
JR
709static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
710 void **addr, unsigned int *bit_nr)
07a33823
JR
711{
712 struct mem_zone_bm_rtree *curr, *zone;
713 struct rtree_node *node;
714 int i, block_nr;
715
3a20cb17
JR
716 zone = bm->cur.zone;
717
718 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
719 goto zone_found;
720
07a33823
JR
721 zone = NULL;
722
723 /* Find the right zone */
724 list_for_each_entry(curr, &bm->zones, list) {
725 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
726 zone = curr;
727 break;
728 }
729 }
730
731 if (!zone)
732 return -EFAULT;
733
3a20cb17 734zone_found:
07a33823 735 /*
ef96f639
RW
736 * We have found the zone. Now walk the radix tree to find the leaf node
737 * for our PFN.
07a33823 738 */
3a20cb17
JR
739 node = bm->cur.node;
740 if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
741 goto node_found;
742
07a33823
JR
743 node = zone->rtree;
744 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
745
746 for (i = zone->levels; i > 0; i--) {
747 int index;
748
749 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
750 index &= BM_RTREE_LEVEL_MASK;
751 BUG_ON(node->data[index] == 0);
752 node = (struct rtree_node *)node->data[index];
753 }
754
3a20cb17
JR
755node_found:
756 /* Update last position */
757 bm->cur.zone = zone;
758 bm->cur.node = node;
759 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
760
07a33823
JR
761 /* Set return values */
762 *addr = node->data;
763 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
764
765 return 0;
766}
767
74dfd666
RW
768static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
769{
770 void *addr;
771 unsigned int bit;
a82f7119 772 int error;
74dfd666 773
a82f7119
RW
774 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
775 BUG_ON(error);
74dfd666
RW
776 set_bit(bit, addr);
777}
778
a82f7119
RW
779static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
780{
781 void *addr;
782 unsigned int bit;
783 int error;
784
785 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
07a33823
JR
786 if (!error)
787 set_bit(bit, addr);
788
a82f7119
RW
789 return error;
790}
791
74dfd666
RW
792static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
793{
794 void *addr;
795 unsigned int bit;
a82f7119 796 int error;
74dfd666 797
a82f7119
RW
798 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
799 BUG_ON(error);
74dfd666
RW
800 clear_bit(bit, addr);
801}
802
fdd64ed5
JR
803static void memory_bm_clear_current(struct memory_bitmap *bm)
804{
805 int bit;
806
807 bit = max(bm->cur.node_bit - 1, 0);
808 clear_bit(bit, bm->cur.node->data);
809}
810
74dfd666
RW
811static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
812{
813 void *addr;
814 unsigned int bit;
9047eb62 815 int error;
74dfd666 816
a82f7119
RW
817 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
818 BUG_ON(error);
9047eb62 819 return test_bit(bit, addr);
b788db79
RW
820}
821
69643279
RW
822static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
823{
824 void *addr;
825 unsigned int bit;
07a33823 826
9047eb62 827 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
b788db79
RW
828}
829
3a20cb17 830/*
ef96f639 831 * rtree_next_node - Jump to the next leaf node.
3a20cb17 832 *
ef96f639
RW
833 * Set the position to the beginning of the next node in the
834 * memory bitmap. This is either the next node in the current
835 * zone's radix tree or the first node in the radix tree of the
836 * next zone.
3a20cb17 837 *
ef96f639 838 * Return true if there is a next node, false otherwise.
3a20cb17
JR
839 */
840static bool rtree_next_node(struct memory_bitmap *bm)
841{
924d8696
JM
842 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
843 bm->cur.node = list_entry(bm->cur.node->list.next,
844 struct rtree_node, list);
3a20cb17
JR
845 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
846 bm->cur.node_bit = 0;
0f7d83e8 847 touch_softlockup_watchdog();
3a20cb17
JR
848 return true;
849 }
850
851 /* No more nodes, goto next zone */
924d8696
JM
852 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
853 bm->cur.zone = list_entry(bm->cur.zone->list.next,
3a20cb17 854 struct mem_zone_bm_rtree, list);
3a20cb17
JR
855 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
856 struct rtree_node, list);
857 bm->cur.node_pfn = 0;
858 bm->cur.node_bit = 0;
859 return true;
860 }
861
862 /* No more zones */
863 return false;
864}
865
9047eb62 866/**
ef96f639
RW
867 * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap.
868 * @bm: Memory bitmap.
3a20cb17 869 *
ef96f639
RW
870 * Starting from the last returned position this function searches for the next
871 * set bit in @bm and returns the PFN represented by it. If no more bits are
872 * set, BM_END_OF_MAP is returned.
9047eb62 873 *
ef96f639
RW
874 * It is required to run memory_bm_position_reset() before the first call to
875 * this function for the given memory bitmap.
3a20cb17 876 */
9047eb62 877static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
3a20cb17
JR
878{
879 unsigned long bits, pfn, pages;
880 int bit;
881
882 do {
883 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
884 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
885 bit = find_next_bit(bm->cur.node->data, bits,
886 bm->cur.node_bit);
887 if (bit < bits) {
888 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
889 bm->cur.node_bit = bit + 1;
890 return pfn;
891 }
892 } while (rtree_next_node(bm));
893
894 return BM_END_OF_MAP;
895}
896
ef96f639
RW
897/*
898 * This structure represents a range of page frames the contents of which
899 * should not be saved during hibernation.
74dfd666 900 */
74dfd666
RW
901struct nosave_region {
902 struct list_head list;
903 unsigned long start_pfn;
904 unsigned long end_pfn;
905};
906
907static LIST_HEAD(nosave_regions);
908
307c5971
RW
909static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
910{
911 struct rtree_node *node;
912
913 list_for_each_entry(node, &zone->nodes, list)
914 recycle_safe_page(node->data);
915
916 list_for_each_entry(node, &zone->leaves, list)
917 recycle_safe_page(node->data);
918}
919
920static void memory_bm_recycle(struct memory_bitmap *bm)
921{
922 struct mem_zone_bm_rtree *zone;
923 struct linked_page *p_list;
924
925 list_for_each_entry(zone, &bm->zones, list)
926 recycle_zone_bm_rtree(zone);
927
928 p_list = bm->p_list;
929 while (p_list) {
930 struct linked_page *lp = p_list;
931
932 p_list = lp->next;
933 recycle_safe_page(lp);
934 }
935}
936
74dfd666 937/**
ef96f639
RW
938 * register_nosave_region - Register a region of unsaveable memory.
939 *
940 * Register a range of page frames the contents of which should not be saved
941 * during hibernation (to be used in the early initialization code).
74dfd666 942 */
efd5a852
RW
943void __init __register_nosave_region(unsigned long start_pfn,
944 unsigned long end_pfn, int use_kmalloc)
74dfd666
RW
945{
946 struct nosave_region *region;
947
948 if (start_pfn >= end_pfn)
949 return;
950
951 if (!list_empty(&nosave_regions)) {
952 /* Try to extend the previous region (they should be sorted) */
953 region = list_entry(nosave_regions.prev,
954 struct nosave_region, list);
955 if (region->end_pfn == start_pfn) {
956 region->end_pfn = end_pfn;
957 goto Report;
958 }
959 }
940d67f6 960 if (use_kmalloc) {
ef96f639 961 /* During init, this shouldn't fail */
940d67f6
JB
962 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
963 BUG_ON(!region);
d5f32af3 964 } else {
940d67f6 965 /* This allocation cannot fail */
7e1c4e27
MR
966 region = memblock_alloc(sizeof(struct nosave_region),
967 SMP_CACHE_BYTES);
d5f32af3 968 }
74dfd666
RW
969 region->start_pfn = start_pfn;
970 region->end_pfn = end_pfn;
971 list_add_tail(&region->list, &nosave_regions);
972 Report:
64ec72a1 973 pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
cd38ca85
BH
974 (unsigned long long) start_pfn << PAGE_SHIFT,
975 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
74dfd666
RW
976}
977
978/*
979 * Set bits in this map correspond to the page frames the contents of which
980 * should not be saved during the suspend.
981 */
982static struct memory_bitmap *forbidden_pages_map;
983
984/* Set bits in this map correspond to free page frames. */
985static struct memory_bitmap *free_pages_map;
986
987/*
988 * Each page frame allocated for creating the image is marked by setting the
989 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
990 */
991
992void swsusp_set_page_free(struct page *page)
993{
994 if (free_pages_map)
995 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
996}
997
998static int swsusp_page_is_free(struct page *page)
999{
1000 return free_pages_map ?
1001 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
1002}
1003
1004void swsusp_unset_page_free(struct page *page)
1005{
1006 if (free_pages_map)
1007 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1008}
1009
1010static void swsusp_set_page_forbidden(struct page *page)
1011{
1012 if (forbidden_pages_map)
1013 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1014}
1015
1016int swsusp_page_is_forbidden(struct page *page)
1017{
1018 return forbidden_pages_map ?
1019 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1020}
1021
1022static void swsusp_unset_page_forbidden(struct page *page)
1023{
1024 if (forbidden_pages_map)
1025 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1026}
1027
1028/**
ef96f639
RW
1029 * mark_nosave_pages - Mark pages that should not be saved.
1030 * @bm: Memory bitmap.
1031 *
1032 * Set the bits in @bm that correspond to the page frames the contents of which
1033 * should not be saved.
74dfd666 1034 */
74dfd666
RW
1035static void mark_nosave_pages(struct memory_bitmap *bm)
1036{
1037 struct nosave_region *region;
1038
1039 if (list_empty(&nosave_regions))
1040 return;
1041
1042 list_for_each_entry(region, &nosave_regions, list) {
1043 unsigned long pfn;
1044
64ec72a1 1045 pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
69f1d475
BH
1046 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1047 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1048 - 1);
74dfd666
RW
1049
1050 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
a82f7119
RW
1051 if (pfn_valid(pfn)) {
1052 /*
1053 * It is safe to ignore the result of
1054 * mem_bm_set_bit_check() here, since we won't
1055 * touch the PFNs for which the error is
1056 * returned anyway.
1057 */
1058 mem_bm_set_bit_check(bm, pfn);
1059 }
74dfd666
RW
1060 }
1061}
1062
1063/**
ef96f639
RW
1064 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1065 *
1066 * Create bitmaps needed for marking page frames that should not be saved and
1067 * free page frames. The forbidden_pages_map and free_pages_map pointers are
1068 * only modified if everything goes well, because we don't want the bits to be
1069 * touched before both bitmaps are set up.
74dfd666 1070 */
74dfd666
RW
1071int create_basic_memory_bitmaps(void)
1072{
1073 struct memory_bitmap *bm1, *bm2;
1074 int error = 0;
1075
aab17289
RW
1076 if (forbidden_pages_map && free_pages_map)
1077 return 0;
1078 else
1079 BUG_ON(forbidden_pages_map || free_pages_map);
74dfd666 1080
0709db60 1081 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
74dfd666
RW
1082 if (!bm1)
1083 return -ENOMEM;
1084
0709db60 1085 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
74dfd666
RW
1086 if (error)
1087 goto Free_first_object;
1088
0709db60 1089 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
74dfd666
RW
1090 if (!bm2)
1091 goto Free_first_bitmap;
1092
0709db60 1093 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
74dfd666
RW
1094 if (error)
1095 goto Free_second_object;
1096
1097 forbidden_pages_map = bm1;
1098 free_pages_map = bm2;
1099 mark_nosave_pages(forbidden_pages_map);
1100
64ec72a1 1101 pr_debug("Basic memory bitmaps created\n");
74dfd666
RW
1102
1103 return 0;
1104
1105 Free_second_object:
1106 kfree(bm2);
1107 Free_first_bitmap:
1108 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1109 Free_first_object:
1110 kfree(bm1);
1111 return -ENOMEM;
1112}
1113
1114/**
ef96f639
RW
1115 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1116 *
1117 * Free memory bitmaps allocated by create_basic_memory_bitmaps(). The
1118 * auxiliary pointers are necessary so that the bitmaps themselves are not
1119 * referred to while they are being freed.
74dfd666 1120 */
74dfd666
RW
1121void free_basic_memory_bitmaps(void)
1122{
1123 struct memory_bitmap *bm1, *bm2;
1124
6a0c7cd3
RW
1125 if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1126 return;
74dfd666
RW
1127
1128 bm1 = forbidden_pages_map;
1129 bm2 = free_pages_map;
1130 forbidden_pages_map = NULL;
1131 free_pages_map = NULL;
1132 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1133 kfree(bm1);
1134 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1135 kfree(bm2);
1136
64ec72a1 1137 pr_debug("Basic memory bitmaps freed\n");
74dfd666
RW
1138}
1139
1ad1410f
AA
1140void clear_free_pages(void)
1141{
1142#ifdef CONFIG_PAGE_POISONING_ZERO
1143 struct memory_bitmap *bm = free_pages_map;
1144 unsigned long pfn;
1145
1146 if (WARN_ON(!(free_pages_map)))
1147 return;
1148
1149 memory_bm_position_reset(bm);
1150 pfn = memory_bm_next_pfn(bm);
1151 while (pfn != BM_END_OF_MAP) {
1152 if (pfn_valid(pfn))
1153 clear_highpage(pfn_to_page(pfn));
1154
1155 pfn = memory_bm_next_pfn(bm);
1156 }
1157 memory_bm_position_reset(bm);
64ec72a1 1158 pr_info("free pages cleared after restore\n");
1ad1410f
AA
1159#endif /* PAGE_POISONING_ZERO */
1160}
1161
b788db79 1162/**
ef96f639
RW
1163 * snapshot_additional_pages - Estimate the number of extra pages needed.
1164 * @zone: Memory zone to carry out the computation for.
1165 *
1166 * Estimate the number of additional pages needed for setting up a hibernation
1167 * image data structures for @zone (usually, the returned value is greater than
1168 * the exact number).
b788db79 1169 */
b788db79
RW
1170unsigned int snapshot_additional_pages(struct zone *zone)
1171{
f469f02d 1172 unsigned int rtree, nodes;
b788db79 1173
f469f02d
JR
1174 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1175 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1176 LINKED_PAGE_DATA_SIZE);
1177 while (nodes > 1) {
1178 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1179 rtree += nodes;
1180 }
1181
9047eb62 1182 return 2 * rtree;
b788db79
RW
1183}
1184
8357376d
RW
1185#ifdef CONFIG_HIGHMEM
1186/**
ef96f639
RW
1187 * count_free_highmem_pages - Compute the total number of free highmem pages.
1188 *
1189 * The returned number is system-wide.
8357376d 1190 */
8357376d
RW
1191static unsigned int count_free_highmem_pages(void)
1192{
1193 struct zone *zone;
1194 unsigned int cnt = 0;
1195
ee99c71c
KM
1196 for_each_populated_zone(zone)
1197 if (is_highmem(zone))
d23ad423 1198 cnt += zone_page_state(zone, NR_FREE_PAGES);
8357376d
RW
1199
1200 return cnt;
1201}
1202
1203/**
ef96f639
RW
1204 * saveable_highmem_page - Check if a highmem page is saveable.
1205 *
1206 * Determine whether a highmem page should be included in a hibernation image.
8357376d 1207 *
ef96f639
RW
1208 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1209 * and it isn't part of a free chunk of pages.
8357376d 1210 */
846705de 1211static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
8357376d
RW
1212{
1213 struct page *page;
1214
1215 if (!pfn_valid(pfn))
1216 return NULL;
1217
1218 page = pfn_to_page(pfn);
846705de
RW
1219 if (page_zone(page) != zone)
1220 return NULL;
8357376d
RW
1221
1222 BUG_ON(!PageHighMem(page));
1223
7be98234
RW
1224 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) ||
1225 PageReserved(page))
8357376d
RW
1226 return NULL;
1227
c6968e73
SG
1228 if (page_is_guard(page))
1229 return NULL;
1230
8357376d
RW
1231 return page;
1232}
1233
1234/**
ef96f639 1235 * count_highmem_pages - Compute the total number of saveable highmem pages.
8357376d 1236 */
fe419535 1237static unsigned int count_highmem_pages(void)
8357376d
RW
1238{
1239 struct zone *zone;
1240 unsigned int n = 0;
1241
98e73dc5 1242 for_each_populated_zone(zone) {
8357376d
RW
1243 unsigned long pfn, max_zone_pfn;
1244
1245 if (!is_highmem(zone))
1246 continue;
1247
1248 mark_free_pages(zone);
c33bc315 1249 max_zone_pfn = zone_end_pfn(zone);
8357376d 1250 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
846705de 1251 if (saveable_highmem_page(zone, pfn))
8357376d
RW
1252 n++;
1253 }
1254 return n;
1255}
1256#else
846705de
RW
1257static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1258{
1259 return NULL;
1260}
8357376d
RW
1261#endif /* CONFIG_HIGHMEM */
1262
25761b6e 1263/**
ef96f639
RW
1264 * saveable_page - Check if the given page is saveable.
1265 *
1266 * Determine whether a non-highmem page should be included in a hibernation
1267 * image.
25761b6e 1268 *
ef96f639
RW
1269 * We should save the page if it isn't Nosave, and is not in the range
1270 * of pages statically defined as 'unsaveable', and it isn't part of
1271 * a free chunk of pages.
25761b6e 1272 */
846705de 1273static struct page *saveable_page(struct zone *zone, unsigned long pfn)
25761b6e 1274{
de491861 1275 struct page *page;
25761b6e
RW
1276
1277 if (!pfn_valid(pfn))
ae83c5ee 1278 return NULL;
25761b6e
RW
1279
1280 page = pfn_to_page(pfn);
846705de
RW
1281 if (page_zone(page) != zone)
1282 return NULL;
ae83c5ee 1283
8357376d
RW
1284 BUG_ON(PageHighMem(page));
1285
7be98234 1286 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
ae83c5ee 1287 return NULL;
8357376d 1288
8a235efa
RW
1289 if (PageReserved(page)
1290 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
ae83c5ee 1291 return NULL;
25761b6e 1292
c6968e73
SG
1293 if (page_is_guard(page))
1294 return NULL;
1295
ae83c5ee 1296 return page;
25761b6e
RW
1297}
1298
8357376d 1299/**
ef96f639 1300 * count_data_pages - Compute the total number of saveable non-highmem pages.
8357376d 1301 */
fe419535 1302static unsigned int count_data_pages(void)
25761b6e
RW
1303{
1304 struct zone *zone;
ae83c5ee 1305 unsigned long pfn, max_zone_pfn;
dc19d507 1306 unsigned int n = 0;
25761b6e 1307
98e73dc5 1308 for_each_populated_zone(zone) {
25761b6e
RW
1309 if (is_highmem(zone))
1310 continue;
8357376d 1311
25761b6e 1312 mark_free_pages(zone);
c33bc315 1313 max_zone_pfn = zone_end_pfn(zone);
ae83c5ee 1314 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
846705de 1315 if (saveable_page(zone, pfn))
8357376d 1316 n++;
25761b6e 1317 }
a0f49651 1318 return n;
25761b6e
RW
1319}
1320
ef96f639
RW
1321/*
1322 * This is needed, because copy_page and memcpy are not usable for copying
8357376d
RW
1323 * task structs.
1324 */
1325static inline void do_copy_page(long *dst, long *src)
f623f0db
RW
1326{
1327 int n;
1328
f623f0db
RW
1329 for (n = PAGE_SIZE / sizeof(long); n; n--)
1330 *dst++ = *src++;
1331}
1332
8a235efa 1333/**
ef96f639
RW
1334 * safe_copy_page - Copy a page in a safe way.
1335 *
1336 * Check if the page we are going to copy is marked as present in the kernel
1337 * page tables (this always is the case if CONFIG_DEBUG_PAGEALLOC is not set
1338 * and in that case kernel_page_present() always returns 'true').
8a235efa
RW
1339 */
1340static void safe_copy_page(void *dst, struct page *s_page)
1341{
1342 if (kernel_page_present(s_page)) {
1343 do_copy_page(dst, page_address(s_page));
1344 } else {
1345 kernel_map_pages(s_page, 1, 1);
1346 do_copy_page(dst, page_address(s_page));
1347 kernel_map_pages(s_page, 1, 0);
1348 }
1349}
1350
8357376d 1351#ifdef CONFIG_HIGHMEM
efd5a852 1352static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
8357376d
RW
1353{
1354 return is_highmem(zone) ?
846705de 1355 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
8357376d
RW
1356}
1357
8a235efa 1358static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
8357376d
RW
1359{
1360 struct page *s_page, *d_page;
1361 void *src, *dst;
1362
1363 s_page = pfn_to_page(src_pfn);
1364 d_page = pfn_to_page(dst_pfn);
1365 if (PageHighMem(s_page)) {
0de9a1e2
CW
1366 src = kmap_atomic(s_page);
1367 dst = kmap_atomic(d_page);
8357376d 1368 do_copy_page(dst, src);
0de9a1e2
CW
1369 kunmap_atomic(dst);
1370 kunmap_atomic(src);
8357376d 1371 } else {
8357376d 1372 if (PageHighMem(d_page)) {
ef96f639
RW
1373 /*
1374 * The page pointed to by src may contain some kernel
8357376d
RW
1375 * data modified by kmap_atomic()
1376 */
8a235efa 1377 safe_copy_page(buffer, s_page);
0de9a1e2 1378 dst = kmap_atomic(d_page);
3ecb01df 1379 copy_page(dst, buffer);
0de9a1e2 1380 kunmap_atomic(dst);
8357376d 1381 } else {
8a235efa 1382 safe_copy_page(page_address(d_page), s_page);
8357376d
RW
1383 }
1384 }
1385}
1386#else
846705de 1387#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
8357376d 1388
8a235efa 1389static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
8357376d 1390{
8a235efa
RW
1391 safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1392 pfn_to_page(src_pfn));
8357376d
RW
1393}
1394#endif /* CONFIG_HIGHMEM */
1395
efd5a852
RW
1396static void copy_data_pages(struct memory_bitmap *copy_bm,
1397 struct memory_bitmap *orig_bm)
25761b6e
RW
1398{
1399 struct zone *zone;
b788db79 1400 unsigned long pfn;
25761b6e 1401
98e73dc5 1402 for_each_populated_zone(zone) {
b788db79
RW
1403 unsigned long max_zone_pfn;
1404
25761b6e 1405 mark_free_pages(zone);
c33bc315 1406 max_zone_pfn = zone_end_pfn(zone);
b788db79 1407 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
8357376d 1408 if (page_is_saveable(zone, pfn))
b788db79 1409 memory_bm_set_bit(orig_bm, pfn);
25761b6e 1410 }
b788db79
RW
1411 memory_bm_position_reset(orig_bm);
1412 memory_bm_position_reset(copy_bm);
df7c4872 1413 for(;;) {
b788db79 1414 pfn = memory_bm_next_pfn(orig_bm);
df7c4872
FW
1415 if (unlikely(pfn == BM_END_OF_MAP))
1416 break;
1417 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1418 }
25761b6e
RW
1419}
1420
8357376d
RW
1421/* Total number of image pages */
1422static unsigned int nr_copy_pages;
1423/* Number of pages needed for saving the original pfns of the image pages */
1424static unsigned int nr_meta_pages;
64a473cb
RW
1425/*
1426 * Numbers of normal and highmem page frames allocated for hibernation image
1427 * before suspending devices.
1428 */
0bae5fd3 1429static unsigned int alloc_normal, alloc_highmem;
64a473cb
RW
1430/*
1431 * Memory bitmap used for marking saveable pages (during hibernation) or
1432 * hibernation image pages (during restore)
1433 */
1434static struct memory_bitmap orig_bm;
1435/*
1436 * Memory bitmap used during hibernation for marking allocated page frames that
1437 * will contain copies of saveable pages. During restore it is initially used
1438 * for marking hibernation image pages, but then the set bits from it are
1439 * duplicated in @orig_bm and it is released. On highmem systems it is next
1440 * used for marking "safe" highmem pages, but it has to be reinitialized for
1441 * this purpose.
1442 */
1443static struct memory_bitmap copy_bm;
8357376d 1444
25761b6e 1445/**
ef96f639 1446 * swsusp_free - Free pages allocated for hibernation image.
cd560bb2 1447 *
ef96f639
RW
1448 * Image pages are alocated before snapshot creation, so they need to be
1449 * released after resume.
25761b6e 1450 */
25761b6e
RW
1451void swsusp_free(void)
1452{
fdd64ed5 1453 unsigned long fb_pfn, fr_pfn;
6efde38f 1454
fdd64ed5
JR
1455 if (!forbidden_pages_map || !free_pages_map)
1456 goto out;
1457
1458 memory_bm_position_reset(forbidden_pages_map);
1459 memory_bm_position_reset(free_pages_map);
1460
1461loop:
1462 fr_pfn = memory_bm_next_pfn(free_pages_map);
1463 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1464
1465 /*
1466 * Find the next bit set in both bitmaps. This is guaranteed to
1467 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1468 */
1469 do {
1470 if (fb_pfn < fr_pfn)
1471 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1472 if (fr_pfn < fb_pfn)
1473 fr_pfn = memory_bm_next_pfn(free_pages_map);
1474 } while (fb_pfn != fr_pfn);
1475
1476 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1477 struct page *page = pfn_to_page(fr_pfn);
1478
1479 memory_bm_clear_current(forbidden_pages_map);
1480 memory_bm_clear_current(free_pages_map);
4c0b6c10 1481 hibernate_restore_unprotect_page(page_address(page));
fdd64ed5
JR
1482 __free_page(page);
1483 goto loop;
25761b6e 1484 }
fdd64ed5
JR
1485
1486out:
f577eb30
RW
1487 nr_copy_pages = 0;
1488 nr_meta_pages = 0;
75534b50 1489 restore_pblist = NULL;
6e1819d6 1490 buffer = NULL;
64a473cb
RW
1491 alloc_normal = 0;
1492 alloc_highmem = 0;
4c0b6c10 1493 hibernate_restore_protection_end();
25761b6e
RW
1494}
1495
4bb33435
RW
1496/* Helper functions used for the shrinking of memory. */
1497
1498#define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1499
fe419535 1500/**
ef96f639 1501 * preallocate_image_pages - Allocate a number of pages for hibernation image.
4bb33435
RW
1502 * @nr_pages: Number of page frames to allocate.
1503 * @mask: GFP flags to use for the allocation.
fe419535 1504 *
4bb33435
RW
1505 * Return value: Number of page frames actually allocated
1506 */
1507static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1508{
1509 unsigned long nr_alloc = 0;
1510
1511 while (nr_pages > 0) {
64a473cb
RW
1512 struct page *page;
1513
1514 page = alloc_image_page(mask);
1515 if (!page)
4bb33435 1516 break;
64a473cb
RW
1517 memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1518 if (PageHighMem(page))
1519 alloc_highmem++;
1520 else
1521 alloc_normal++;
4bb33435
RW
1522 nr_pages--;
1523 nr_alloc++;
1524 }
1525
1526 return nr_alloc;
1527}
1528
6715045d
RW
1529static unsigned long preallocate_image_memory(unsigned long nr_pages,
1530 unsigned long avail_normal)
4bb33435 1531{
6715045d
RW
1532 unsigned long alloc;
1533
1534 if (avail_normal <= alloc_normal)
1535 return 0;
1536
1537 alloc = avail_normal - alloc_normal;
1538 if (nr_pages < alloc)
1539 alloc = nr_pages;
1540
1541 return preallocate_image_pages(alloc, GFP_IMAGE);
4bb33435
RW
1542}
1543
1544#ifdef CONFIG_HIGHMEM
1545static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1546{
1547 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1548}
1549
1550/**
ef96f639 1551 * __fraction - Compute (an approximation of) x * (multiplier / base).
fe419535 1552 */
4bb33435
RW
1553static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1554{
1555 x *= multiplier;
1556 do_div(x, base);
1557 return (unsigned long)x;
1558}
fe419535 1559
4bb33435 1560static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
efd5a852
RW
1561 unsigned long highmem,
1562 unsigned long total)
fe419535 1563{
4bb33435
RW
1564 unsigned long alloc = __fraction(nr_pages, highmem, total);
1565
1566 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
fe419535 1567}
4bb33435
RW
1568#else /* CONFIG_HIGHMEM */
1569static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1570{
1571 return 0;
1572}
1573
1574static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
efd5a852
RW
1575 unsigned long highmem,
1576 unsigned long total)
4bb33435
RW
1577{
1578 return 0;
1579}
1580#endif /* CONFIG_HIGHMEM */
fe419535 1581
4bb33435 1582/**
ef96f639 1583 * free_unnecessary_pages - Release preallocated pages not needed for the image.
64a473cb 1584 */
a64fc82c 1585static unsigned long free_unnecessary_pages(void)
64a473cb 1586{
a64fc82c 1587 unsigned long save, to_free_normal, to_free_highmem, free;
64a473cb 1588
6715045d
RW
1589 save = count_data_pages();
1590 if (alloc_normal >= save) {
1591 to_free_normal = alloc_normal - save;
1592 save = 0;
1593 } else {
1594 to_free_normal = 0;
1595 save -= alloc_normal;
1596 }
1597 save += count_highmem_pages();
1598 if (alloc_highmem >= save) {
1599 to_free_highmem = alloc_highmem - save;
64a473cb
RW
1600 } else {
1601 to_free_highmem = 0;
4d4cf23c
RW
1602 save -= alloc_highmem;
1603 if (to_free_normal > save)
1604 to_free_normal -= save;
1605 else
1606 to_free_normal = 0;
64a473cb 1607 }
a64fc82c 1608 free = to_free_normal + to_free_highmem;
64a473cb
RW
1609
1610 memory_bm_position_reset(&copy_bm);
1611
a9c9b442 1612 while (to_free_normal > 0 || to_free_highmem > 0) {
64a473cb
RW
1613 unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1614 struct page *page = pfn_to_page(pfn);
1615
1616 if (PageHighMem(page)) {
1617 if (!to_free_highmem)
1618 continue;
1619 to_free_highmem--;
1620 alloc_highmem--;
1621 } else {
1622 if (!to_free_normal)
1623 continue;
1624 to_free_normal--;
1625 alloc_normal--;
1626 }
1627 memory_bm_clear_bit(&copy_bm, pfn);
1628 swsusp_unset_page_forbidden(page);
1629 swsusp_unset_page_free(page);
1630 __free_page(page);
1631 }
a64fc82c
WK
1632
1633 return free;
64a473cb
RW
1634}
1635
ef4aede3 1636/**
ef96f639 1637 * minimum_image_size - Estimate the minimum acceptable size of an image.
ef4aede3
RW
1638 * @saveable: Number of saveable pages in the system.
1639 *
1640 * We want to avoid attempting to free too much memory too hard, so estimate the
1641 * minimum acceptable size of a hibernation image to use as the lower limit for
1642 * preallocating memory.
1643 *
1644 * We assume that the minimum image size should be proportional to
1645 *
1646 * [number of saveable pages] - [number of pages that can be freed in theory]
1647 *
1648 * where the second term is the sum of (1) reclaimable slab pages, (2) active
bdbc98ab 1649 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages.
ef4aede3
RW
1650 */
1651static unsigned long minimum_image_size(unsigned long saveable)
1652{
1653 unsigned long size;
1654
d507e2eb 1655 size = global_node_page_state(NR_SLAB_RECLAIMABLE)
599d0c95
MG
1656 + global_node_page_state(NR_ACTIVE_ANON)
1657 + global_node_page_state(NR_INACTIVE_ANON)
1658 + global_node_page_state(NR_ACTIVE_FILE)
bdbc98ab 1659 + global_node_page_state(NR_INACTIVE_FILE);
ef4aede3
RW
1660
1661 return saveable <= size ? 0 : saveable - size;
1662}
1663
64a473cb 1664/**
ef96f639 1665 * hibernate_preallocate_memory - Preallocate memory for hibernation image.
4bb33435
RW
1666 *
1667 * To create a hibernation image it is necessary to make a copy of every page
1668 * frame in use. We also need a number of page frames to be free during
1669 * hibernation for allocations made while saving the image and for device
1670 * drivers, in case they need to allocate memory from their hibernation
ddeb6487
RW
1671 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1672 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1673 * /sys/power/reserved_size, respectively). To make this happen, we compute the
1674 * total number of available page frames and allocate at least
4bb33435 1675 *
ddeb6487
RW
1676 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1677 * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
4bb33435
RW
1678 *
1679 * of them, which corresponds to the maximum size of a hibernation image.
1680 *
1681 * If image_size is set below the number following from the above formula,
1682 * the preallocation of memory is continued until the total number of saveable
ef4aede3
RW
1683 * pages in the system is below the requested image size or the minimum
1684 * acceptable image size returned by minimum_image_size(), whichever is greater.
4bb33435 1685 */
64a473cb 1686int hibernate_preallocate_memory(void)
fe419535 1687{
fe419535 1688 struct zone *zone;
4bb33435 1689 unsigned long saveable, size, max_size, count, highmem, pages = 0;
6715045d 1690 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
db597605 1691 ktime_t start, stop;
64a473cb 1692 int error;
fe419535 1693
64ec72a1 1694 pr_info("Preallocating image memory... ");
db597605 1695 start = ktime_get();
fe419535 1696
64a473cb
RW
1697 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1698 if (error)
1699 goto err_out;
1700
1701 error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1702 if (error)
1703 goto err_out;
1704
1705 alloc_normal = 0;
1706 alloc_highmem = 0;
1707
4bb33435 1708 /* Count the number of saveable data pages. */
64a473cb 1709 save_highmem = count_highmem_pages();
4bb33435 1710 saveable = count_data_pages();
fe419535 1711
4bb33435
RW
1712 /*
1713 * Compute the total number of page frames we can use (count) and the
1714 * number of pages needed for image metadata (size).
1715 */
1716 count = saveable;
64a473cb
RW
1717 saveable += save_highmem;
1718 highmem = save_highmem;
4bb33435
RW
1719 size = 0;
1720 for_each_populated_zone(zone) {
1721 size += snapshot_additional_pages(zone);
1722 if (is_highmem(zone))
1723 highmem += zone_page_state(zone, NR_FREE_PAGES);
1724 else
1725 count += zone_page_state(zone, NR_FREE_PAGES);
1726 }
6715045d 1727 avail_normal = count;
4bb33435
RW
1728 count += highmem;
1729 count -= totalreserve_pages;
1730
85055dd8
MS
1731 /* Add number of pages required for page keys (s390 only). */
1732 size += page_key_additional_pages(saveable);
1733
4bb33435 1734 /* Compute the maximum number of saveable pages to leave in memory. */
ddeb6487
RW
1735 max_size = (count - (size + PAGES_FOR_IO)) / 2
1736 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
266f1a25 1737 /* Compute the desired number of image pages specified by image_size. */
4bb33435
RW
1738 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1739 if (size > max_size)
1740 size = max_size;
1741 /*
266f1a25
RW
1742 * If the desired number of image pages is at least as large as the
1743 * current number of saveable pages in memory, allocate page frames for
1744 * the image and we're done.
4bb33435 1745 */
64a473cb
RW
1746 if (size >= saveable) {
1747 pages = preallocate_image_highmem(save_highmem);
6715045d 1748 pages += preallocate_image_memory(saveable - pages, avail_normal);
4bb33435 1749 goto out;
64a473cb 1750 }
4bb33435 1751
ef4aede3
RW
1752 /* Estimate the minimum size of the image. */
1753 pages = minimum_image_size(saveable);
6715045d
RW
1754 /*
1755 * To avoid excessive pressure on the normal zone, leave room in it to
1756 * accommodate an image of the minimum size (unless it's already too
1757 * small, in which case don't preallocate pages from it at all).
1758 */
1759 if (avail_normal > pages)
1760 avail_normal -= pages;
1761 else
1762 avail_normal = 0;
ef4aede3
RW
1763 if (size < pages)
1764 size = min_t(unsigned long, pages, max_size);
1765
4bb33435
RW
1766 /*
1767 * Let the memory management subsystem know that we're going to need a
1768 * large number of page frames to allocate and make it free some memory.
1769 * NOTE: If this is not done, performance will be hurt badly in some
1770 * test cases.
1771 */
1772 shrink_all_memory(saveable - size);
1773
1774 /*
1775 * The number of saveable pages in memory was too high, so apply some
1776 * pressure to decrease it. First, make room for the largest possible
1777 * image and fail if that doesn't work. Next, try to decrease the size
ef4aede3
RW
1778 * of the image as much as indicated by 'size' using allocations from
1779 * highmem and non-highmem zones separately.
4bb33435
RW
1780 */
1781 pages_highmem = preallocate_image_highmem(highmem / 2);
fd432b9f
AL
1782 alloc = count - max_size;
1783 if (alloc > pages_highmem)
1784 alloc -= pages_highmem;
1785 else
1786 alloc = 0;
6715045d
RW
1787 pages = preallocate_image_memory(alloc, avail_normal);
1788 if (pages < alloc) {
1789 /* We have exhausted non-highmem pages, try highmem. */
1790 alloc -= pages;
1791 pages += pages_highmem;
1792 pages_highmem = preallocate_image_highmem(alloc);
1793 if (pages_highmem < alloc)
1794 goto err_out;
1795 pages += pages_highmem;
1796 /*
1797 * size is the desired number of saveable pages to leave in
1798 * memory, so try to preallocate (all memory - size) pages.
1799 */
1800 alloc = (count - pages) - size;
1801 pages += preallocate_image_highmem(alloc);
1802 } else {
1803 /*
1804 * There are approximately max_size saveable pages at this point
1805 * and we want to reduce this number down to size.
1806 */
1807 alloc = max_size - size;
1808 size = preallocate_highmem_fraction(alloc, highmem, count);
1809 pages_highmem += size;
1810 alloc -= size;
1811 size = preallocate_image_memory(alloc, avail_normal);
1812 pages_highmem += preallocate_image_highmem(alloc - size);
1813 pages += pages_highmem + size;
1814 }
4bb33435 1815
64a473cb
RW
1816 /*
1817 * We only need as many page frames for the image as there are saveable
1818 * pages in memory, but we have allocated more. Release the excessive
1819 * ones now.
1820 */
a64fc82c 1821 pages -= free_unnecessary_pages();
4bb33435
RW
1822
1823 out:
db597605 1824 stop = ktime_get();
64ec72a1 1825 pr_cont("done (allocated %lu pages)\n", pages);
db597605 1826 swsusp_show_speed(start, stop, pages, "Allocated");
fe419535
RW
1827
1828 return 0;
64a473cb
RW
1829
1830 err_out:
64ec72a1 1831 pr_cont("\n");
64a473cb
RW
1832 swsusp_free();
1833 return -ENOMEM;
fe419535
RW
1834}
1835
8357376d
RW
1836#ifdef CONFIG_HIGHMEM
1837/**
ef96f639
RW
1838 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1839 *
1840 * Compute the number of non-highmem pages that will be necessary for creating
1841 * copies of highmem pages.
1842 */
8357376d
RW
1843static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1844{
64a473cb 1845 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
8357376d
RW
1846
1847 if (free_highmem >= nr_highmem)
1848 nr_highmem = 0;
1849 else
1850 nr_highmem -= free_highmem;
1851
1852 return nr_highmem;
1853}
1854#else
efd5a852 1855static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
8357376d 1856#endif /* CONFIG_HIGHMEM */
25761b6e
RW
1857
1858/**
ef96f639 1859 * enough_free_mem - Check if there is enough free memory for the image.
25761b6e 1860 */
8357376d 1861static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
25761b6e 1862{
e5e2fa78 1863 struct zone *zone;
64a473cb 1864 unsigned int free = alloc_normal;
e5e2fa78 1865
98e73dc5 1866 for_each_populated_zone(zone)
8357376d 1867 if (!is_highmem(zone))
d23ad423 1868 free += zone_page_state(zone, NR_FREE_PAGES);
940864dd 1869
8357376d 1870 nr_pages += count_pages_for_highmem(nr_highmem);
64ec72a1
JP
1871 pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
1872 nr_pages, PAGES_FOR_IO, free);
940864dd 1873
64a473cb 1874 return free > nr_pages + PAGES_FOR_IO;
25761b6e
RW
1875}
1876
8357376d
RW
1877#ifdef CONFIG_HIGHMEM
1878/**
ef96f639
RW
1879 * get_highmem_buffer - Allocate a buffer for highmem pages.
1880 *
1881 * If there are some highmem pages in the hibernation image, we may need a
1882 * buffer to copy them and/or load their data.
8357376d 1883 */
8357376d
RW
1884static inline int get_highmem_buffer(int safe_needed)
1885{
453f85d4 1886 buffer = get_image_page(GFP_ATOMIC, safe_needed);
8357376d
RW
1887 return buffer ? 0 : -ENOMEM;
1888}
1889
1890/**
ef96f639
RW
1891 * alloc_highmem_image_pages - Allocate some highmem pages for the image.
1892 *
1893 * Try to allocate as many pages as needed, but if the number of free highmem
1894 * pages is less than that, allocate them all.
8357376d 1895 */
efd5a852
RW
1896static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1897 unsigned int nr_highmem)
8357376d
RW
1898{
1899 unsigned int to_alloc = count_free_highmem_pages();
1900
1901 if (to_alloc > nr_highmem)
1902 to_alloc = nr_highmem;
1903
1904 nr_highmem -= to_alloc;
1905 while (to_alloc-- > 0) {
1906 struct page *page;
1907
d0164adc 1908 page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
8357376d
RW
1909 memory_bm_set_bit(bm, page_to_pfn(page));
1910 }
1911 return nr_highmem;
1912}
1913#else
1914static inline int get_highmem_buffer(int safe_needed) { return 0; }
1915
efd5a852
RW
1916static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1917 unsigned int n) { return 0; }
8357376d
RW
1918#endif /* CONFIG_HIGHMEM */
1919
1920/**
ef96f639 1921 * swsusp_alloc - Allocate memory for hibernation image.
8357376d 1922 *
ef96f639
RW
1923 * We first try to allocate as many highmem pages as there are
1924 * saveable highmem pages in the system. If that fails, we allocate
1925 * non-highmem pages for the copies of the remaining highmem ones.
8357376d 1926 *
ef96f639
RW
1927 * In this approach it is likely that the copies of highmem pages will
1928 * also be located in the high memory, because of the way in which
1929 * copy_data_pages() works.
8357376d 1930 */
eba74c29 1931static int swsusp_alloc(struct memory_bitmap *copy_bm,
efd5a852 1932 unsigned int nr_pages, unsigned int nr_highmem)
054bd4c1 1933{
8357376d 1934 if (nr_highmem > 0) {
2e725a06 1935 if (get_highmem_buffer(PG_ANY))
64a473cb
RW
1936 goto err_out;
1937 if (nr_highmem > alloc_highmem) {
1938 nr_highmem -= alloc_highmem;
1939 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1940 }
8357376d 1941 }
64a473cb
RW
1942 if (nr_pages > alloc_normal) {
1943 nr_pages -= alloc_normal;
1944 while (nr_pages-- > 0) {
1945 struct page *page;
1946
453f85d4 1947 page = alloc_image_page(GFP_ATOMIC);
64a473cb
RW
1948 if (!page)
1949 goto err_out;
1950 memory_bm_set_bit(copy_bm, page_to_pfn(page));
1951 }
25761b6e 1952 }
64a473cb 1953
b788db79 1954 return 0;
25761b6e 1955
64a473cb 1956 err_out:
b788db79 1957 swsusp_free();
2e725a06 1958 return -ENOMEM;
25761b6e
RW
1959}
1960
722a9f92 1961asmlinkage __visible int swsusp_save(void)
25761b6e 1962{
8357376d 1963 unsigned int nr_pages, nr_highmem;
25761b6e 1964
64ec72a1 1965 pr_info("Creating hibernation image:\n");
25761b6e 1966
9f8f2172 1967 drain_local_pages(NULL);
a0f49651 1968 nr_pages = count_data_pages();
8357376d 1969 nr_highmem = count_highmem_pages();
64ec72a1 1970 pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
25761b6e 1971
8357376d 1972 if (!enough_free_mem(nr_pages, nr_highmem)) {
64ec72a1 1973 pr_err("Not enough free memory\n");
25761b6e
RW
1974 return -ENOMEM;
1975 }
1976
eba74c29 1977 if (swsusp_alloc(&copy_bm, nr_pages, nr_highmem)) {
64ec72a1 1978 pr_err("Memory allocation failed\n");
a0f49651 1979 return -ENOMEM;
8357376d 1980 }
25761b6e 1981
ef96f639
RW
1982 /*
1983 * During allocating of suspend pagedir, new cold pages may appear.
25761b6e
RW
1984 * Kill them.
1985 */
9f8f2172 1986 drain_local_pages(NULL);
b788db79 1987 copy_data_pages(&copy_bm, &orig_bm);
25761b6e
RW
1988
1989 /*
1990 * End of critical section. From now on, we can write to memory,
1991 * but we should not touch disk. This specially means we must _not_
1992 * touch swap space! Except we must write out our image of course.
1993 */
1994
8357376d 1995 nr_pages += nr_highmem;
a0f49651 1996 nr_copy_pages = nr_pages;
8357376d 1997 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
a0f49651 1998
64ec72a1 1999 pr_info("Hibernation image created (%d pages copied)\n", nr_pages);
8357376d 2000
25761b6e
RW
2001 return 0;
2002}
f577eb30 2003
d307c4a8
RW
2004#ifndef CONFIG_ARCH_HIBERNATION_HEADER
2005static int init_header_complete(struct swsusp_info *info)
f577eb30 2006{
d307c4a8 2007 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
f577eb30 2008 info->version_code = LINUX_VERSION_CODE;
d307c4a8
RW
2009 return 0;
2010}
2011
2012static char *check_image_kernel(struct swsusp_info *info)
2013{
2014 if (info->version_code != LINUX_VERSION_CODE)
2015 return "kernel version";
2016 if (strcmp(info->uts.sysname,init_utsname()->sysname))
2017 return "system type";
2018 if (strcmp(info->uts.release,init_utsname()->release))
2019 return "kernel release";
2020 if (strcmp(info->uts.version,init_utsname()->version))
2021 return "version";
2022 if (strcmp(info->uts.machine,init_utsname()->machine))
2023 return "machine";
2024 return NULL;
2025}
2026#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2027
af508b34
RW
2028unsigned long snapshot_get_image_size(void)
2029{
2030 return nr_copy_pages + nr_meta_pages + 1;
2031}
2032
d307c4a8
RW
2033static int init_header(struct swsusp_info *info)
2034{
2035 memset(info, 0, sizeof(struct swsusp_info));
0ed5fd13 2036 info->num_physpages = get_num_physpages();
f577eb30 2037 info->image_pages = nr_copy_pages;
af508b34 2038 info->pages = snapshot_get_image_size();
6e1819d6
RW
2039 info->size = info->pages;
2040 info->size <<= PAGE_SHIFT;
d307c4a8 2041 return init_header_complete(info);
f577eb30
RW
2042}
2043
2044/**
ef96f639
RW
2045 * pack_pfns - Prepare PFNs for saving.
2046 * @bm: Memory bitmap.
2047 * @buf: Memory buffer to store the PFNs in.
2048 *
2049 * PFNs corresponding to set bits in @bm are stored in the area of memory
2050 * pointed to by @buf (1 page at a time).
f577eb30 2051 */
efd5a852 2052static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
f577eb30
RW
2053{
2054 int j;
2055
b788db79 2056 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
940864dd
RW
2057 buf[j] = memory_bm_next_pfn(bm);
2058 if (unlikely(buf[j] == BM_END_OF_MAP))
b788db79 2059 break;
85055dd8
MS
2060 /* Save page key for data page (s390 only). */
2061 page_key_read(buf + j);
f577eb30 2062 }
f577eb30
RW
2063}
2064
2065/**
ef96f639
RW
2066 * snapshot_read_next - Get the address to read the next image page from.
2067 * @handle: Snapshot handle to be used for the reading.
f577eb30 2068 *
ef96f639
RW
2069 * On the first call, @handle should point to a zeroed snapshot_handle
2070 * structure. The structure gets populated then and a pointer to it should be
2071 * passed to this function every next time.
f577eb30 2072 *
ef96f639
RW
2073 * On success, the function returns a positive number. Then, the caller
2074 * is allowed to read up to the returned number of bytes from the memory
2075 * location computed by the data_of() macro.
f577eb30 2076 *
ef96f639
RW
2077 * The function returns 0 to indicate the end of the data stream condition,
2078 * and negative numbers are returned on errors. If that happens, the structure
2079 * pointed to by @handle is not updated and should not be used any more.
f577eb30 2080 */
d3c1b24c 2081int snapshot_read_next(struct snapshot_handle *handle)
f577eb30 2082{
fb13a28b 2083 if (handle->cur > nr_meta_pages + nr_copy_pages)
f577eb30 2084 return 0;
b788db79 2085
f577eb30
RW
2086 if (!buffer) {
2087 /* This makes the buffer be freed by swsusp_free() */
8357376d 2088 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
f577eb30
RW
2089 if (!buffer)
2090 return -ENOMEM;
2091 }
d3c1b24c 2092 if (!handle->cur) {
d307c4a8
RW
2093 int error;
2094
2095 error = init_header((struct swsusp_info *)buffer);
2096 if (error)
2097 return error;
f577eb30 2098 handle->buffer = buffer;
b788db79
RW
2099 memory_bm_position_reset(&orig_bm);
2100 memory_bm_position_reset(&copy_bm);
d3c1b24c 2101 } else if (handle->cur <= nr_meta_pages) {
3ecb01df 2102 clear_page(buffer);
d3c1b24c
JS
2103 pack_pfns(buffer, &orig_bm);
2104 } else {
2105 struct page *page;
b788db79 2106
d3c1b24c
JS
2107 page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
2108 if (PageHighMem(page)) {
ef96f639
RW
2109 /*
2110 * Highmem pages are copied to the buffer,
d3c1b24c
JS
2111 * because we can't return with a kmapped
2112 * highmem page (we may not be called again).
2113 */
2114 void *kaddr;
8357376d 2115
0de9a1e2 2116 kaddr = kmap_atomic(page);
3ecb01df 2117 copy_page(buffer, kaddr);
0de9a1e2 2118 kunmap_atomic(kaddr);
d3c1b24c
JS
2119 handle->buffer = buffer;
2120 } else {
2121 handle->buffer = page_address(page);
f577eb30 2122 }
f577eb30 2123 }
d3c1b24c
JS
2124 handle->cur++;
2125 return PAGE_SIZE;
f577eb30
RW
2126}
2127
6dbecfd3
RW
2128static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2129 struct memory_bitmap *src)
2130{
2131 unsigned long pfn;
2132
2133 memory_bm_position_reset(src);
2134 pfn = memory_bm_next_pfn(src);
2135 while (pfn != BM_END_OF_MAP) {
2136 memory_bm_set_bit(dst, pfn);
2137 pfn = memory_bm_next_pfn(src);
2138 }
2139}
2140
f577eb30 2141/**
ef96f639
RW
2142 * mark_unsafe_pages - Mark pages that were used before hibernation.
2143 *
2144 * Mark the pages that cannot be used for storing the image during restoration,
2145 * because they conflict with the pages that had been used before hibernation.
f577eb30 2146 */
6dbecfd3 2147static void mark_unsafe_pages(struct memory_bitmap *bm)
f577eb30 2148{
6dbecfd3 2149 unsigned long pfn;
f577eb30 2150
6dbecfd3
RW
2151 /* Clear the "free"/"unsafe" bit for all PFNs */
2152 memory_bm_position_reset(free_pages_map);
2153 pfn = memory_bm_next_pfn(free_pages_map);
2154 while (pfn != BM_END_OF_MAP) {
2155 memory_bm_clear_current(free_pages_map);
2156 pfn = memory_bm_next_pfn(free_pages_map);
f577eb30
RW
2157 }
2158
6dbecfd3
RW
2159 /* Mark pages that correspond to the "original" PFNs as "unsafe" */
2160 duplicate_memory_bitmap(free_pages_map, bm);
f577eb30 2161
940864dd 2162 allocated_unsafe_pages = 0;
f577eb30
RW
2163}
2164
d307c4a8 2165static int check_header(struct swsusp_info *info)
f577eb30 2166{
d307c4a8 2167 char *reason;
f577eb30 2168
d307c4a8 2169 reason = check_image_kernel(info);
0ed5fd13 2170 if (!reason && info->num_physpages != get_num_physpages())
f577eb30 2171 reason = "memory size";
f577eb30 2172 if (reason) {
64ec72a1 2173 pr_err("Image mismatch: %s\n", reason);
f577eb30
RW
2174 return -EPERM;
2175 }
2176 return 0;
2177}
2178
2179/**
ef96f639 2180 * load header - Check the image header and copy the data from it.
f577eb30 2181 */
efd5a852 2182static int load_header(struct swsusp_info *info)
f577eb30
RW
2183{
2184 int error;
f577eb30 2185
940864dd 2186 restore_pblist = NULL;
f577eb30
RW
2187 error = check_header(info);
2188 if (!error) {
f577eb30
RW
2189 nr_copy_pages = info->image_pages;
2190 nr_meta_pages = info->pages - info->image_pages - 1;
2191 }
2192 return error;
2193}
2194
2195/**
ef96f639
RW
2196 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2197 * @bm: Memory bitmap.
2198 * @buf: Area of memory containing the PFNs.
2199 *
2200 * For each element of the array pointed to by @buf (1 page at a time), set the
2201 * corresponding bit in @bm.
f577eb30 2202 */
69643279 2203static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
f577eb30
RW
2204{
2205 int j;
2206
940864dd
RW
2207 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2208 if (unlikely(buf[j] == BM_END_OF_MAP))
2209 break;
2210
85055dd8
MS
2211 /* Extract and buffer page key for data page (s390 only). */
2212 page_key_memorize(buf + j);
2213
6dbecfd3 2214 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
69643279
RW
2215 memory_bm_set_bit(bm, buf[j]);
2216 else
2217 return -EFAULT;
f577eb30 2218 }
69643279
RW
2219
2220 return 0;
f577eb30
RW
2221}
2222
8357376d 2223#ifdef CONFIG_HIGHMEM
ef96f639
RW
2224/*
2225 * struct highmem_pbe is used for creating the list of highmem pages that
8357376d
RW
2226 * should be restored atomically during the resume from disk, because the page
2227 * frames they have occupied before the suspend are in use.
2228 */
2229struct highmem_pbe {
2230 struct page *copy_page; /* data is here now */
2231 struct page *orig_page; /* data was here before the suspend */
2232 struct highmem_pbe *next;
2233};
2234
ef96f639
RW
2235/*
2236 * List of highmem PBEs needed for restoring the highmem pages that were
8357376d
RW
2237 * allocated before the suspend and included in the suspend image, but have
2238 * also been allocated by the "resume" kernel, so their contents cannot be
2239 * written directly to their "original" page frames.
2240 */
2241static struct highmem_pbe *highmem_pblist;
2242
2243/**
ef96f639
RW
2244 * count_highmem_image_pages - Compute the number of highmem pages in the image.
2245 * @bm: Memory bitmap.
2246 *
2247 * The bits in @bm that correspond to image pages are assumed to be set.
8357376d 2248 */
8357376d
RW
2249static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2250{
2251 unsigned long pfn;
2252 unsigned int cnt = 0;
2253
2254 memory_bm_position_reset(bm);
2255 pfn = memory_bm_next_pfn(bm);
2256 while (pfn != BM_END_OF_MAP) {
2257 if (PageHighMem(pfn_to_page(pfn)))
2258 cnt++;
2259
2260 pfn = memory_bm_next_pfn(bm);
2261 }
2262 return cnt;
2263}
2264
8357376d
RW
2265static unsigned int safe_highmem_pages;
2266
2267static struct memory_bitmap *safe_highmem_bm;
2268
ef96f639
RW
2269/**
2270 * prepare_highmem_image - Allocate memory for loading highmem data from image.
2271 * @bm: Pointer to an uninitialized memory bitmap structure.
2272 * @nr_highmem_p: Pointer to the number of highmem image pages.
2273 *
2274 * Try to allocate as many highmem pages as there are highmem image pages
2275 * (@nr_highmem_p points to the variable containing the number of highmem image
2276 * pages). The pages that are "safe" (ie. will not be overwritten when the
2277 * hibernation image is restored entirely) have the corresponding bits set in
2278 * @bm (it must be unitialized).
2279 *
2280 * NOTE: This function should not be called if there are no highmem image pages.
2281 */
efd5a852
RW
2282static int prepare_highmem_image(struct memory_bitmap *bm,
2283 unsigned int *nr_highmem_p)
8357376d
RW
2284{
2285 unsigned int to_alloc;
2286
2287 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2288 return -ENOMEM;
2289
2290 if (get_highmem_buffer(PG_SAFE))
2291 return -ENOMEM;
2292
2293 to_alloc = count_free_highmem_pages();
2294 if (to_alloc > *nr_highmem_p)
2295 to_alloc = *nr_highmem_p;
2296 else
2297 *nr_highmem_p = to_alloc;
2298
2299 safe_highmem_pages = 0;
2300 while (to_alloc-- > 0) {
2301 struct page *page;
2302
2303 page = alloc_page(__GFP_HIGHMEM);
7be98234 2304 if (!swsusp_page_is_free(page)) {
8357376d
RW
2305 /* The page is "safe", set its bit the bitmap */
2306 memory_bm_set_bit(bm, page_to_pfn(page));
2307 safe_highmem_pages++;
2308 }
2309 /* Mark the page as allocated */
7be98234
RW
2310 swsusp_set_page_forbidden(page);
2311 swsusp_set_page_free(page);
8357376d
RW
2312 }
2313 memory_bm_position_reset(bm);
2314 safe_highmem_bm = bm;
2315 return 0;
2316}
2317
ef96f639
RW
2318static struct page *last_highmem_page;
2319
8357376d 2320/**
ef96f639
RW
2321 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2322 *
2323 * For a given highmem image page get a buffer that suspend_write_next() should
2324 * return to its caller to write to.
8357376d 2325 *
ef96f639
RW
2326 * If the page is to be saved to its "original" page frame or a copy of
2327 * the page is to be made in the highmem, @buffer is returned. Otherwise,
2328 * the copy of the page is to be made in normal memory, so the address of
2329 * the copy is returned.
8357376d 2330 *
ef96f639
RW
2331 * If @buffer is returned, the caller of suspend_write_next() will write
2332 * the page's contents to @buffer, so they will have to be copied to the
2333 * right location on the next call to suspend_write_next() and it is done
2334 * with the help of copy_last_highmem_page(). For this purpose, if
2335 * @buffer is returned, @last_highmem_page is set to the page to which
2336 * the data will have to be copied from @buffer.
8357376d 2337 */
efd5a852
RW
2338static void *get_highmem_page_buffer(struct page *page,
2339 struct chain_allocator *ca)
8357376d
RW
2340{
2341 struct highmem_pbe *pbe;
2342 void *kaddr;
2343
7be98234 2344 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
ef96f639
RW
2345 /*
2346 * We have allocated the "original" page frame and we can
8357376d
RW
2347 * use it directly to store the loaded page.
2348 */
2349 last_highmem_page = page;
2350 return buffer;
2351 }
ef96f639
RW
2352 /*
2353 * The "original" page frame has not been allocated and we have to
8357376d
RW
2354 * use a "safe" page frame to store the loaded page.
2355 */
2356 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2357 if (!pbe) {
2358 swsusp_free();
69643279 2359 return ERR_PTR(-ENOMEM);
8357376d
RW
2360 }
2361 pbe->orig_page = page;
2362 if (safe_highmem_pages > 0) {
2363 struct page *tmp;
2364
2365 /* Copy of the page will be stored in high memory */
2366 kaddr = buffer;
2367 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2368 safe_highmem_pages--;
2369 last_highmem_page = tmp;
2370 pbe->copy_page = tmp;
2371 } else {
2372 /* Copy of the page will be stored in normal memory */
2373 kaddr = safe_pages_list;
2374 safe_pages_list = safe_pages_list->next;
2375 pbe->copy_page = virt_to_page(kaddr);
2376 }
2377 pbe->next = highmem_pblist;
2378 highmem_pblist = pbe;
2379 return kaddr;
2380}
2381
2382/**
ef96f639
RW
2383 * copy_last_highmem_page - Copy most the most recent highmem image page.
2384 *
2385 * Copy the contents of a highmem image from @buffer, where the caller of
2386 * snapshot_write_next() has stored them, to the right location represented by
2387 * @last_highmem_page .
8357376d 2388 */
8357376d
RW
2389static void copy_last_highmem_page(void)
2390{
2391 if (last_highmem_page) {
2392 void *dst;
2393
0de9a1e2 2394 dst = kmap_atomic(last_highmem_page);
3ecb01df 2395 copy_page(dst, buffer);
0de9a1e2 2396 kunmap_atomic(dst);
8357376d
RW
2397 last_highmem_page = NULL;
2398 }
2399}
2400
2401static inline int last_highmem_page_copied(void)
2402{
2403 return !last_highmem_page;
2404}
2405
2406static inline void free_highmem_data(void)
2407{
2408 if (safe_highmem_bm)
2409 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2410
2411 if (buffer)
2412 free_image_page(buffer, PG_UNSAFE_CLEAR);
2413}
2414#else
efd5a852 2415static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
8357376d 2416
efd5a852
RW
2417static inline int prepare_highmem_image(struct memory_bitmap *bm,
2418 unsigned int *nr_highmem_p) { return 0; }
8357376d 2419
efd5a852
RW
2420static inline void *get_highmem_page_buffer(struct page *page,
2421 struct chain_allocator *ca)
8357376d 2422{
69643279 2423 return ERR_PTR(-EINVAL);
8357376d
RW
2424}
2425
2426static inline void copy_last_highmem_page(void) {}
2427static inline int last_highmem_page_copied(void) { return 1; }
2428static inline void free_highmem_data(void) {}
2429#endif /* CONFIG_HIGHMEM */
2430
ef96f639
RW
2431#define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2432
f577eb30 2433/**
ef96f639
RW
2434 * prepare_image - Make room for loading hibernation image.
2435 * @new_bm: Unitialized memory bitmap structure.
2436 * @bm: Memory bitmap with unsafe pages marked.
2437 *
2438 * Use @bm to mark the pages that will be overwritten in the process of
2439 * restoring the system memory state from the suspend image ("unsafe" pages)
2440 * and allocate memory for the image.
968808b8 2441 *
ef96f639
RW
2442 * The idea is to allocate a new memory bitmap first and then allocate
2443 * as many pages as needed for image data, but without specifying what those
2444 * pages will be used for just yet. Instead, we mark them all as allocated and
2445 * create a lists of "safe" pages to be used later. On systems with high
2446 * memory a list of "safe" highmem pages is created too.
f577eb30 2447 */
efd5a852 2448static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
f577eb30 2449{
8357376d 2450 unsigned int nr_pages, nr_highmem;
9c744481 2451 struct linked_page *lp;
940864dd 2452 int error;
f577eb30 2453
8357376d
RW
2454 /* If there is no highmem, the buffer will not be necessary */
2455 free_image_page(buffer, PG_UNSAFE_CLEAR);
2456 buffer = NULL;
2457
2458 nr_highmem = count_highmem_image_pages(bm);
6dbecfd3 2459 mark_unsafe_pages(bm);
940864dd
RW
2460
2461 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2462 if (error)
2463 goto Free;
2464
2465 duplicate_memory_bitmap(new_bm, bm);
2466 memory_bm_free(bm, PG_UNSAFE_KEEP);
8357376d
RW
2467 if (nr_highmem > 0) {
2468 error = prepare_highmem_image(bm, &nr_highmem);
2469 if (error)
2470 goto Free;
2471 }
ef96f639
RW
2472 /*
2473 * Reserve some safe pages for potential later use.
940864dd
RW
2474 *
2475 * NOTE: This way we make sure there will be enough safe pages for the
2476 * chain_alloc() in get_buffer(). It is a bit wasteful, but
2477 * nr_copy_pages cannot be greater than 50% of the memory anyway.
9c744481
RW
2478 *
2479 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
940864dd 2480 */
8357376d 2481 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
940864dd
RW
2482 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2483 while (nr_pages > 0) {
8357376d 2484 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
940864dd 2485 if (!lp) {
f577eb30 2486 error = -ENOMEM;
940864dd
RW
2487 goto Free;
2488 }
9c744481
RW
2489 lp->next = safe_pages_list;
2490 safe_pages_list = lp;
940864dd 2491 nr_pages--;
f577eb30 2492 }
940864dd 2493 /* Preallocate memory for the image */
8357376d 2494 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
940864dd
RW
2495 while (nr_pages > 0) {
2496 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2497 if (!lp) {
2498 error = -ENOMEM;
2499 goto Free;
2500 }
7be98234 2501 if (!swsusp_page_is_free(virt_to_page(lp))) {
940864dd
RW
2502 /* The page is "safe", add it to the list */
2503 lp->next = safe_pages_list;
2504 safe_pages_list = lp;
968808b8 2505 }
940864dd 2506 /* Mark the page as allocated */
7be98234
RW
2507 swsusp_set_page_forbidden(virt_to_page(lp));
2508 swsusp_set_page_free(virt_to_page(lp));
940864dd 2509 nr_pages--;
968808b8 2510 }
940864dd
RW
2511 return 0;
2512
59a49335 2513 Free:
940864dd 2514 swsusp_free();
f577eb30
RW
2515 return error;
2516}
2517
940864dd 2518/**
ef96f639
RW
2519 * get_buffer - Get the address to store the next image data page.
2520 *
2521 * Get the address that snapshot_write_next() should return to its caller to
2522 * write to.
940864dd 2523 */
940864dd 2524static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
968808b8 2525{
940864dd 2526 struct pbe *pbe;
69643279
RW
2527 struct page *page;
2528 unsigned long pfn = memory_bm_next_pfn(bm);
968808b8 2529
69643279
RW
2530 if (pfn == BM_END_OF_MAP)
2531 return ERR_PTR(-EFAULT);
2532
2533 page = pfn_to_page(pfn);
8357376d
RW
2534 if (PageHighMem(page))
2535 return get_highmem_page_buffer(page, ca);
2536
7be98234 2537 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
ef96f639
RW
2538 /*
2539 * We have allocated the "original" page frame and we can
940864dd 2540 * use it directly to store the loaded page.
968808b8 2541 */
940864dd
RW
2542 return page_address(page);
2543
ef96f639
RW
2544 /*
2545 * The "original" page frame has not been allocated and we have to
940864dd 2546 * use a "safe" page frame to store the loaded page.
968808b8 2547 */
940864dd
RW
2548 pbe = chain_alloc(ca, sizeof(struct pbe));
2549 if (!pbe) {
2550 swsusp_free();
69643279 2551 return ERR_PTR(-ENOMEM);
940864dd 2552 }
8357376d
RW
2553 pbe->orig_address = page_address(page);
2554 pbe->address = safe_pages_list;
940864dd
RW
2555 safe_pages_list = safe_pages_list->next;
2556 pbe->next = restore_pblist;
2557 restore_pblist = pbe;
8357376d 2558 return pbe->address;
968808b8
RW
2559}
2560
f577eb30 2561/**
ef96f639
RW
2562 * snapshot_write_next - Get the address to store the next image page.
2563 * @handle: Snapshot handle structure to guide the writing.
f577eb30 2564 *
ef96f639
RW
2565 * On the first call, @handle should point to a zeroed snapshot_handle
2566 * structure. The structure gets populated then and a pointer to it should be
2567 * passed to this function every next time.
f577eb30 2568 *
ef96f639
RW
2569 * On success, the function returns a positive number. Then, the caller
2570 * is allowed to write up to the returned number of bytes to the memory
2571 * location computed by the data_of() macro.
f577eb30 2572 *
ef96f639
RW
2573 * The function returns 0 to indicate the "end of file" condition. Negative
2574 * numbers are returned on errors, in which cases the structure pointed to by
2575 * @handle is not updated and should not be used any more.
f577eb30 2576 */
d3c1b24c 2577int snapshot_write_next(struct snapshot_handle *handle)
f577eb30 2578{
940864dd 2579 static struct chain_allocator ca;
f577eb30
RW
2580 int error = 0;
2581
940864dd 2582 /* Check if we have already loaded the entire image */
d3c1b24c 2583 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
f577eb30 2584 return 0;
940864dd 2585
d3c1b24c
JS
2586 handle->sync_read = 1;
2587
2588 if (!handle->cur) {
8357376d
RW
2589 if (!buffer)
2590 /* This makes the buffer be freed by swsusp_free() */
2591 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2592
f577eb30
RW
2593 if (!buffer)
2594 return -ENOMEM;
8357376d 2595
f577eb30 2596 handle->buffer = buffer;
d3c1b24c
JS
2597 } else if (handle->cur == 1) {
2598 error = load_header(buffer);
2599 if (error)
2600 return error;
940864dd 2601
9c744481
RW
2602 safe_pages_list = NULL;
2603
d3c1b24c
JS
2604 error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2605 if (error)
2606 return error;
2607
85055dd8
MS
2608 /* Allocate buffer for page keys. */
2609 error = page_key_alloc(nr_copy_pages);
2610 if (error)
2611 return error;
2612
4c0b6c10 2613 hibernate_restore_protection_begin();
d3c1b24c
JS
2614 } else if (handle->cur <= nr_meta_pages + 1) {
2615 error = unpack_orig_pfns(buffer, &copy_bm);
2616 if (error)
2617 return error;
940864dd 2618
d3c1b24c
JS
2619 if (handle->cur == nr_meta_pages + 1) {
2620 error = prepare_image(&orig_bm, &copy_bm);
69643279
RW
2621 if (error)
2622 return error;
2623
d3c1b24c
JS
2624 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2625 memory_bm_position_reset(&orig_bm);
2626 restore_pblist = NULL;
940864dd 2627 handle->buffer = get_buffer(&orig_bm, &ca);
d3c1b24c 2628 handle->sync_read = 0;
69643279
RW
2629 if (IS_ERR(handle->buffer))
2630 return PTR_ERR(handle->buffer);
f577eb30 2631 }
f577eb30 2632 } else {
d3c1b24c 2633 copy_last_highmem_page();
85055dd8
MS
2634 /* Restore page key for data page (s390 only). */
2635 page_key_write(handle->buffer);
4c0b6c10 2636 hibernate_restore_protect_page(handle->buffer);
d3c1b24c
JS
2637 handle->buffer = get_buffer(&orig_bm, &ca);
2638 if (IS_ERR(handle->buffer))
2639 return PTR_ERR(handle->buffer);
2640 if (handle->buffer != buffer)
2641 handle->sync_read = 0;
f577eb30 2642 }
d3c1b24c
JS
2643 handle->cur++;
2644 return PAGE_SIZE;
f577eb30
RW
2645}
2646
8357376d 2647/**
ef96f639
RW
2648 * snapshot_write_finalize - Complete the loading of a hibernation image.
2649 *
2650 * Must be called after the last call to snapshot_write_next() in case the last
2651 * page in the image happens to be a highmem page and its contents should be
2652 * stored in highmem. Additionally, it recycles bitmap memory that's not
2653 * necessary any more.
8357376d 2654 */
8357376d
RW
2655void snapshot_write_finalize(struct snapshot_handle *handle)
2656{
2657 copy_last_highmem_page();
85055dd8
MS
2658 /* Restore page key for data page (s390 only). */
2659 page_key_write(handle->buffer);
2660 page_key_free();
4c0b6c10 2661 hibernate_restore_protect_page(handle->buffer);
307c5971 2662 /* Do that only if we have loaded the image entirely */
d3c1b24c 2663 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
307c5971 2664 memory_bm_recycle(&orig_bm);
8357376d
RW
2665 free_highmem_data();
2666 }
2667}
2668
f577eb30
RW
2669int snapshot_image_loaded(struct snapshot_handle *handle)
2670{
8357376d 2671 return !(!nr_copy_pages || !last_highmem_page_copied() ||
940864dd
RW
2672 handle->cur <= nr_meta_pages + nr_copy_pages);
2673}
2674
8357376d
RW
2675#ifdef CONFIG_HIGHMEM
2676/* Assumes that @buf is ready and points to a "safe" page */
efd5a852
RW
2677static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2678 void *buf)
940864dd 2679{
8357376d
RW
2680 void *kaddr1, *kaddr2;
2681
0de9a1e2
CW
2682 kaddr1 = kmap_atomic(p1);
2683 kaddr2 = kmap_atomic(p2);
3ecb01df
JB
2684 copy_page(buf, kaddr1);
2685 copy_page(kaddr1, kaddr2);
2686 copy_page(kaddr2, buf);
0de9a1e2
CW
2687 kunmap_atomic(kaddr2);
2688 kunmap_atomic(kaddr1);
8357376d
RW
2689}
2690
2691/**
ef96f639
RW
2692 * restore_highmem - Put highmem image pages into their original locations.
2693 *
2694 * For each highmem page that was in use before hibernation and is included in
2695 * the image, and also has been allocated by the "restore" kernel, swap its
2696 * current contents with the previous (ie. "before hibernation") ones.
8357376d 2697 *
ef96f639
RW
2698 * If the restore eventually fails, we can call this function once again and
2699 * restore the highmem state as seen by the restore kernel.
8357376d 2700 */
8357376d
RW
2701int restore_highmem(void)
2702{
2703 struct highmem_pbe *pbe = highmem_pblist;
2704 void *buf;
2705
2706 if (!pbe)
2707 return 0;
2708
2709 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2710 if (!buf)
2711 return -ENOMEM;
2712
2713 while (pbe) {
2714 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2715 pbe = pbe->next;
2716 }
2717 free_image_page(buf, PG_UNSAFE_CLEAR);
2718 return 0;
f577eb30 2719}
8357376d 2720#endif /* CONFIG_HIGHMEM */