PM / Hibernate: Do not initialize static and extern variables to 0
[linux-block.git] / kernel / power / swap.c
CommitLineData
61159a31
RW
1/*
2 * linux/kernel/power/swap.c
3 *
4 * This file provides functions for reading the suspend image from
5 * and writing it to a swap partition.
6 *
a2531293 7 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
61159a31 8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9f339caf 9 * Copyright (C) 2010 Bojan Smojver <bojan@rexursive.com>
61159a31
RW
10 *
11 * This file is released under the GPLv2.
12 *
13 */
14
15#include <linux/module.h>
61159a31 16#include <linux/file.h>
61159a31
RW
17#include <linux/delay.h>
18#include <linux/bitops.h>
19#include <linux/genhd.h>
20#include <linux/device.h>
21#include <linux/buffer_head.h>
22#include <linux/bio.h>
546e0d27 23#include <linux/blkdev.h>
61159a31
RW
24#include <linux/swap.h>
25#include <linux/swapops.h>
26#include <linux/pm.h>
5a0e3ad6 27#include <linux/slab.h>
f996fc96
BS
28#include <linux/lzo.h>
29#include <linux/vmalloc.h>
61159a31
RW
30
31#include "power.h"
32
be8cd644 33#define HIBERNATE_SIG "S1SUSPEND"
61159a31 34
51fb352b
JS
35/*
36 * The swap map is a data structure used for keeping track of each page
37 * written to a swap partition. It consists of many swap_map_page
90133673 38 * structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
51fb352b
JS
39 * These structures are stored on the swap and linked together with the
40 * help of the .next_swap member.
41 *
42 * The swap map is created during suspend. The swap map pages are
43 * allocated and populated one at a time, so we only need one memory
44 * page to set up the entire structure.
45 *
46 * During resume we also only need to use one swap_map_page structure
47 * at a time.
48 */
49
50#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
51
52struct swap_map_page {
53 sector_t entries[MAP_PAGE_ENTRIES];
54 sector_t next_swap;
55};
56
57/**
58 * The swap_map_handle structure is used for handling swap in
59 * a file-alike way
60 */
61
62struct swap_map_handle {
63 struct swap_map_page *cur;
64 sector_t cur_swap;
65 sector_t first_sector;
66 unsigned int k;
67};
68
1b29c164 69struct swsusp_header {
a634cc10 70 char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int)];
3aef83e0 71 sector_t image;
a634cc10 72 unsigned int flags; /* Flags to pass to the "boot" kernel */
61159a31
RW
73 char orig_sig[10];
74 char sig[10];
1b29c164
VG
75} __attribute__((packed));
76
77static struct swsusp_header *swsusp_header;
61159a31 78
0414f2ec
NC
79/**
80 * The following functions are used for tracing the allocated
81 * swap pages, so that they can be freed in case of an error.
82 */
83
84struct swsusp_extent {
85 struct rb_node node;
86 unsigned long start;
87 unsigned long end;
88};
89
90static struct rb_root swsusp_extents = RB_ROOT;
91
92static int swsusp_extents_insert(unsigned long swap_offset)
93{
94 struct rb_node **new = &(swsusp_extents.rb_node);
95 struct rb_node *parent = NULL;
96 struct swsusp_extent *ext;
97
98 /* Figure out where to put the new node */
99 while (*new) {
100 ext = container_of(*new, struct swsusp_extent, node);
101 parent = *new;
102 if (swap_offset < ext->start) {
103 /* Try to merge */
104 if (swap_offset == ext->start - 1) {
105 ext->start--;
106 return 0;
107 }
108 new = &((*new)->rb_left);
109 } else if (swap_offset > ext->end) {
110 /* Try to merge */
111 if (swap_offset == ext->end + 1) {
112 ext->end++;
113 return 0;
114 }
115 new = &((*new)->rb_right);
116 } else {
117 /* It already is in the tree */
118 return -EINVAL;
119 }
120 }
121 /* Add the new node and rebalance the tree. */
122 ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
123 if (!ext)
124 return -ENOMEM;
125
126 ext->start = swap_offset;
127 ext->end = swap_offset;
128 rb_link_node(&ext->node, parent, new);
129 rb_insert_color(&ext->node, &swsusp_extents);
130 return 0;
131}
132
133/**
134 * alloc_swapdev_block - allocate a swap page and register that it has
135 * been allocated, so that it can be freed in case of an error.
136 */
137
138sector_t alloc_swapdev_block(int swap)
139{
140 unsigned long offset;
141
910321ea 142 offset = swp_offset(get_swap_page_of_type(swap));
0414f2ec
NC
143 if (offset) {
144 if (swsusp_extents_insert(offset))
910321ea 145 swap_free(swp_entry(swap, offset));
0414f2ec
NC
146 else
147 return swapdev_block(swap, offset);
148 }
149 return 0;
150}
151
152/**
153 * free_all_swap_pages - free swap pages allocated for saving image data.
90133673 154 * It also frees the extents used to register which swap entries had been
0414f2ec
NC
155 * allocated.
156 */
157
158void free_all_swap_pages(int swap)
159{
160 struct rb_node *node;
161
162 while ((node = swsusp_extents.rb_node)) {
163 struct swsusp_extent *ext;
164 unsigned long offset;
165
166 ext = container_of(node, struct swsusp_extent, node);
167 rb_erase(node, &swsusp_extents);
168 for (offset = ext->start; offset <= ext->end; offset++)
910321ea 169 swap_free(swp_entry(swap, offset));
0414f2ec
NC
170
171 kfree(ext);
172 }
173}
174
175int swsusp_swap_in_use(void)
176{
177 return (swsusp_extents.rb_node != NULL);
178}
179
61159a31 180/*
3fc6b34f 181 * General things
61159a31
RW
182 */
183
184static unsigned short root_swap = 0xffff;
8a0d613f 185struct block_device *hib_resume_bdev;
3fc6b34f 186
3fc6b34f
RW
187/*
188 * Saving part
189 */
61159a31 190
51fb352b 191static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
61159a31
RW
192{
193 int error;
194
8a0d613f 195 hib_bio_read_page(swsusp_resume_block, swsusp_header, NULL);
1b29c164
VG
196 if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
197 !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
198 memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
3624eb04 199 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
51fb352b 200 swsusp_header->image = handle->first_sector;
a634cc10 201 swsusp_header->flags = flags;
8a0d613f 202 error = hib_bio_write_page(swsusp_resume_block,
1b29c164 203 swsusp_header, NULL);
61159a31 204 } else {
23976728 205 printk(KERN_ERR "PM: Swap header not found!\n");
61159a31
RW
206 error = -ENODEV;
207 }
208 return error;
209}
210
211/**
212 * swsusp_swap_check - check if the resume device is a swap device
213 * and get its index (if so)
6f612af5
JS
214 *
215 * This is called before saving image
61159a31 216 */
6f612af5 217static int swsusp_swap_check(void)
61159a31 218{
3aef83e0
RW
219 int res;
220
7bf23687 221 res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
8a0d613f 222 &hib_resume_bdev);
3aef83e0
RW
223 if (res < 0)
224 return res;
225
226 root_swap = res;
e525fd89 227 res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
7bf23687
RW
228 if (res)
229 return res;
3aef83e0 230
8a0d613f 231 res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
3aef83e0 232 if (res < 0)
8a0d613f 233 blkdev_put(hib_resume_bdev, FMODE_WRITE);
61159a31 234
61159a31
RW
235 return res;
236}
237
238/**
239 * write_page - Write one page to given swap location.
240 * @buf: Address we're writing.
241 * @offset: Offset of the swap page we're writing to.
ab954160 242 * @bio_chain: Link the next write BIO here
61159a31
RW
243 */
244
3aef83e0 245static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
61159a31 246{
3aef83e0
RW
247 void *src;
248
249 if (!offset)
250 return -ENOSPC;
251
252 if (bio_chain) {
85949121 253 src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
3aef83e0 254 if (src) {
3ecb01df 255 copy_page(src, buf);
3aef83e0
RW
256 } else {
257 WARN_ON_ONCE(1);
258 bio_chain = NULL; /* Go synchronous */
259 src = buf;
ab954160 260 }
3aef83e0
RW
261 } else {
262 src = buf;
61159a31 263 }
8a0d613f 264 return hib_bio_write_page(offset, src, bio_chain);
61159a31
RW
265}
266
61159a31
RW
267static void release_swap_writer(struct swap_map_handle *handle)
268{
269 if (handle->cur)
270 free_page((unsigned long)handle->cur);
271 handle->cur = NULL;
61159a31
RW
272}
273
274static int get_swap_writer(struct swap_map_handle *handle)
275{
6f612af5
JS
276 int ret;
277
278 ret = swsusp_swap_check();
279 if (ret) {
280 if (ret != -ENOSPC)
281 printk(KERN_ERR "PM: Cannot find swap device, try "
282 "swapon -a.\n");
283 return ret;
284 }
61159a31 285 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
6f612af5
JS
286 if (!handle->cur) {
287 ret = -ENOMEM;
288 goto err_close;
289 }
d1d241cc 290 handle->cur_swap = alloc_swapdev_block(root_swap);
61159a31 291 if (!handle->cur_swap) {
6f612af5
JS
292 ret = -ENOSPC;
293 goto err_rel;
61159a31
RW
294 }
295 handle->k = 0;
51fb352b 296 handle->first_sector = handle->cur_swap;
61159a31 297 return 0;
6f612af5
JS
298err_rel:
299 release_swap_writer(handle);
300err_close:
301 swsusp_close(FMODE_WRITE);
302 return ret;
61159a31
RW
303}
304
ab954160
AM
305static int swap_write_page(struct swap_map_handle *handle, void *buf,
306 struct bio **bio_chain)
307{
308 int error = 0;
3aef83e0 309 sector_t offset;
61159a31
RW
310
311 if (!handle->cur)
312 return -EINVAL;
d1d241cc 313 offset = alloc_swapdev_block(root_swap);
ab954160 314 error = write_page(buf, offset, bio_chain);
61159a31
RW
315 if (error)
316 return error;
317 handle->cur->entries[handle->k++] = offset;
318 if (handle->k >= MAP_PAGE_ENTRIES) {
8a0d613f 319 error = hib_wait_on_bio_chain(bio_chain);
ab954160
AM
320 if (error)
321 goto out;
d1d241cc 322 offset = alloc_swapdev_block(root_swap);
61159a31
RW
323 if (!offset)
324 return -ENOSPC;
325 handle->cur->next_swap = offset;
ab954160 326 error = write_page(handle->cur, handle->cur_swap, NULL);
61159a31 327 if (error)
ab954160 328 goto out;
3ecb01df 329 clear_page(handle->cur);
61159a31
RW
330 handle->cur_swap = offset;
331 handle->k = 0;
332 }
59a49335 333 out:
ab954160 334 return error;
61159a31
RW
335}
336
337static int flush_swap_writer(struct swap_map_handle *handle)
338{
339 if (handle->cur && handle->cur_swap)
ab954160 340 return write_page(handle->cur, handle->cur_swap, NULL);
61159a31
RW
341 else
342 return -EINVAL;
343}
344
6f612af5
JS
345static int swap_writer_finish(struct swap_map_handle *handle,
346 unsigned int flags, int error)
347{
348 if (!error) {
349 flush_swap_writer(handle);
350 printk(KERN_INFO "PM: S");
351 error = mark_swapfiles(handle, flags);
352 printk("|\n");
353 }
354
355 if (error)
356 free_all_swap_pages(root_swap);
357 release_swap_writer(handle);
358 swsusp_close(FMODE_WRITE);
359
360 return error;
361}
362
f996fc96
BS
363/* We need to remember how much compressed data we need to read. */
364#define LZO_HEADER sizeof(size_t)
365
366/* Number of pages/bytes we'll compress at one time. */
367#define LZO_UNC_PAGES 32
368#define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE)
369
370/* Number of pages/bytes we need for compressed data (worst case). */
371#define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
372 LZO_HEADER, PAGE_SIZE)
373#define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
374
61159a31
RW
375/**
376 * save_image - save the suspend image data
377 */
378
379static int save_image(struct swap_map_handle *handle,
380 struct snapshot_handle *snapshot,
3a4f7577 381 unsigned int nr_to_write)
61159a31
RW
382{
383 unsigned int m;
384 int ret;
3a4f7577 385 int nr_pages;
ab954160
AM
386 int err2;
387 struct bio *bio;
3a4f7577
AM
388 struct timeval start;
389 struct timeval stop;
61159a31 390
23976728
RW
391 printk(KERN_INFO "PM: Saving image data pages (%u pages) ... ",
392 nr_to_write);
3a4f7577 393 m = nr_to_write / 100;
61159a31
RW
394 if (!m)
395 m = 1;
396 nr_pages = 0;
ab954160 397 bio = NULL;
3a4f7577 398 do_gettimeofday(&start);
4ff277f9 399 while (1) {
d3c1b24c 400 ret = snapshot_read_next(snapshot);
4ff277f9
JS
401 if (ret <= 0)
402 break;
403 ret = swap_write_page(handle, data_of(*snapshot), &bio);
404 if (ret)
405 break;
406 if (!(nr_pages % m))
66d0ae4d 407 printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m);
4ff277f9
JS
408 nr_pages++;
409 }
8a0d613f 410 err2 = hib_wait_on_bio_chain(&bio);
3a4f7577 411 do_gettimeofday(&stop);
4ff277f9
JS
412 if (!ret)
413 ret = err2;
414 if (!ret)
66d0ae4d 415 printk(KERN_CONT "\b\b\b\bdone\n");
4ff277f9 416 else
66d0ae4d 417 printk(KERN_CONT "\n");
0d3a9abe 418 swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
4ff277f9 419 return ret;
61159a31
RW
420}
421
f996fc96
BS
422
423/**
424 * save_image_lzo - Save the suspend image data compressed with LZO.
425 * @handle: Swap mam handle to use for saving the image.
426 * @snapshot: Image to read data from.
427 * @nr_to_write: Number of pages to save.
428 */
429static int save_image_lzo(struct swap_map_handle *handle,
430 struct snapshot_handle *snapshot,
431 unsigned int nr_to_write)
432{
433 unsigned int m;
434 int ret = 0;
435 int nr_pages;
436 int err2;
437 struct bio *bio;
438 struct timeval start;
439 struct timeval stop;
440 size_t off, unc_len, cmp_len;
441 unsigned char *unc, *cmp, *wrk, *page;
442
443 page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
444 if (!page) {
445 printk(KERN_ERR "PM: Failed to allocate LZO page\n");
446 return -ENOMEM;
447 }
448
449 wrk = vmalloc(LZO1X_1_MEM_COMPRESS);
450 if (!wrk) {
451 printk(KERN_ERR "PM: Failed to allocate LZO workspace\n");
452 free_page((unsigned long)page);
453 return -ENOMEM;
454 }
455
456 unc = vmalloc(LZO_UNC_SIZE);
457 if (!unc) {
458 printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n");
459 vfree(wrk);
460 free_page((unsigned long)page);
461 return -ENOMEM;
462 }
463
464 cmp = vmalloc(LZO_CMP_SIZE);
465 if (!cmp) {
466 printk(KERN_ERR "PM: Failed to allocate LZO compressed\n");
467 vfree(unc);
468 vfree(wrk);
469 free_page((unsigned long)page);
470 return -ENOMEM;
471 }
472
473 printk(KERN_INFO
474 "PM: Compressing and saving image data (%u pages) ... ",
475 nr_to_write);
476 m = nr_to_write / 100;
477 if (!m)
478 m = 1;
479 nr_pages = 0;
480 bio = NULL;
481 do_gettimeofday(&start);
482 for (;;) {
483 for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
484 ret = snapshot_read_next(snapshot);
485 if (ret < 0)
486 goto out_finish;
487
488 if (!ret)
489 break;
490
491 memcpy(unc + off, data_of(*snapshot), PAGE_SIZE);
492
493 if (!(nr_pages % m))
494 printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m);
495 nr_pages++;
496 }
497
498 if (!off)
499 break;
500
501 unc_len = off;
502 ret = lzo1x_1_compress(unc, unc_len,
503 cmp + LZO_HEADER, &cmp_len, wrk);
504 if (ret < 0) {
505 printk(KERN_ERR "PM: LZO compression failed\n");
506 break;
507 }
508
509 if (unlikely(!cmp_len ||
510 cmp_len > lzo1x_worst_compress(unc_len))) {
511 printk(KERN_ERR "PM: Invalid LZO compressed length\n");
512 ret = -1;
513 break;
514 }
515
516 *(size_t *)cmp = cmp_len;
517
518 /*
519 * Given we are writing one page at a time to disk, we copy
520 * that much from the buffer, although the last bit will likely
521 * be smaller than full page. This is OK - we saved the length
522 * of the compressed data, so any garbage at the end will be
523 * discarded when we read it.
524 */
525 for (off = 0; off < LZO_HEADER + cmp_len; off += PAGE_SIZE) {
526 memcpy(page, cmp + off, PAGE_SIZE);
527
528 ret = swap_write_page(handle, page, &bio);
529 if (ret)
530 goto out_finish;
531 }
532 }
533
534out_finish:
535 err2 = hib_wait_on_bio_chain(&bio);
536 do_gettimeofday(&stop);
537 if (!ret)
538 ret = err2;
539 if (!ret)
540 printk(KERN_CONT "\b\b\b\bdone\n");
541 else
542 printk(KERN_CONT "\n");
543 swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
544
545 vfree(cmp);
546 vfree(unc);
547 vfree(wrk);
548 free_page((unsigned long)page);
549
550 return ret;
551}
552
61159a31
RW
553/**
554 * enough_swap - Make sure we have enough swap to save the image.
555 *
556 * Returns TRUE or FALSE after checking the total amount of swap
557 * space avaiable from the resume partition.
558 */
559
f996fc96 560static int enough_swap(unsigned int nr_pages, unsigned int flags)
61159a31
RW
561{
562 unsigned int free_swap = count_swap_pages(root_swap, 1);
f996fc96 563 unsigned int required;
61159a31 564
23976728 565 pr_debug("PM: Free swap pages: %u\n", free_swap);
f996fc96
BS
566
567 required = PAGES_FOR_IO + ((flags & SF_NOCOMPRESS_MODE) ?
568 nr_pages : (nr_pages * LZO_CMP_PAGES) / LZO_UNC_PAGES + 1);
569 return free_swap > required;
61159a31
RW
570}
571
572/**
573 * swsusp_write - Write entire image and metadata.
a634cc10 574 * @flags: flags to pass to the "boot" kernel in the image header
61159a31
RW
575 *
576 * It is important _NOT_ to umount filesystems at this point. We want
577 * them synced (in case something goes wrong) but we DO not want to mark
578 * filesystem clean: it is not. (And it does not matter, if we resume
579 * correctly, we'll mark system clean, anyway.)
580 */
581
a634cc10 582int swsusp_write(unsigned int flags)
61159a31
RW
583{
584 struct swap_map_handle handle;
585 struct snapshot_handle snapshot;
586 struct swsusp_info *header;
6f612af5 587 unsigned long pages;
61159a31
RW
588 int error;
589
6f612af5
JS
590 pages = snapshot_get_image_size();
591 error = get_swap_writer(&handle);
3aef83e0 592 if (error) {
6f612af5 593 printk(KERN_ERR "PM: Cannot get swap writer\n");
61159a31
RW
594 return error;
595 }
f996fc96 596 if (!enough_swap(pages, flags)) {
6f612af5
JS
597 printk(KERN_ERR "PM: Not enough free swap\n");
598 error = -ENOSPC;
599 goto out_finish;
600 }
61159a31 601 memset(&snapshot, 0, sizeof(struct snapshot_handle));
d3c1b24c 602 error = snapshot_read_next(&snapshot);
3aef83e0
RW
603 if (error < PAGE_SIZE) {
604 if (error >= 0)
605 error = -EFAULT;
606
6f612af5 607 goto out_finish;
3aef83e0 608 }
61159a31 609 header = (struct swsusp_info *)data_of(snapshot);
6f612af5 610 error = swap_write_page(&handle, header, NULL);
f996fc96
BS
611 if (!error) {
612 error = (flags & SF_NOCOMPRESS_MODE) ?
613 save_image(&handle, &snapshot, pages - 1) :
614 save_image_lzo(&handle, &snapshot, pages - 1);
615 }
6f612af5
JS
616out_finish:
617 error = swap_writer_finish(&handle, flags, error);
61159a31
RW
618 return error;
619}
620
61159a31
RW
621/**
622 * The following functions allow us to read data using a swap map
623 * in a file-alike way
624 */
625
626static void release_swap_reader(struct swap_map_handle *handle)
627{
628 if (handle->cur)
629 free_page((unsigned long)handle->cur);
630 handle->cur = NULL;
631}
632
6f612af5
JS
633static int get_swap_reader(struct swap_map_handle *handle,
634 unsigned int *flags_p)
61159a31
RW
635{
636 int error;
637
6f612af5
JS
638 *flags_p = swsusp_header->flags;
639
640 if (!swsusp_header->image) /* how can this happen? */
61159a31 641 return -EINVAL;
3aef83e0 642
85949121 643 handle->cur = (struct swap_map_page *)get_zeroed_page(__GFP_WAIT | __GFP_HIGH);
61159a31
RW
644 if (!handle->cur)
645 return -ENOMEM;
3aef83e0 646
6f612af5 647 error = hib_bio_read_page(swsusp_header->image, handle->cur, NULL);
61159a31
RW
648 if (error) {
649 release_swap_reader(handle);
650 return error;
651 }
652 handle->k = 0;
653 return 0;
654}
655
546e0d27
AM
656static int swap_read_page(struct swap_map_handle *handle, void *buf,
657 struct bio **bio_chain)
61159a31 658{
3aef83e0 659 sector_t offset;
61159a31
RW
660 int error;
661
662 if (!handle->cur)
663 return -EINVAL;
664 offset = handle->cur->entries[handle->k];
665 if (!offset)
666 return -EFAULT;
8a0d613f 667 error = hib_bio_read_page(offset, buf, bio_chain);
61159a31
RW
668 if (error)
669 return error;
670 if (++handle->k >= MAP_PAGE_ENTRIES) {
8a0d613f 671 error = hib_wait_on_bio_chain(bio_chain);
61159a31
RW
672 handle->k = 0;
673 offset = handle->cur->next_swap;
674 if (!offset)
675 release_swap_reader(handle);
546e0d27 676 else if (!error)
8a0d613f 677 error = hib_bio_read_page(offset, handle->cur, NULL);
61159a31
RW
678 }
679 return error;
680}
681
6f612af5
JS
682static int swap_reader_finish(struct swap_map_handle *handle)
683{
684 release_swap_reader(handle);
685
686 return 0;
687}
688
61159a31
RW
689/**
690 * load_image - load the image using the swap map handle
691 * @handle and the snapshot handle @snapshot
692 * (assume there are @nr_pages pages to load)
693 */
694
695static int load_image(struct swap_map_handle *handle,
696 struct snapshot_handle *snapshot,
546e0d27 697 unsigned int nr_to_read)
61159a31
RW
698{
699 unsigned int m;
61159a31 700 int error = 0;
8c002494
AM
701 struct timeval start;
702 struct timeval stop;
546e0d27
AM
703 struct bio *bio;
704 int err2;
705 unsigned nr_pages;
61159a31 706
23976728
RW
707 printk(KERN_INFO "PM: Loading image data pages (%u pages) ... ",
708 nr_to_read);
546e0d27 709 m = nr_to_read / 100;
61159a31
RW
710 if (!m)
711 m = 1;
712 nr_pages = 0;
546e0d27 713 bio = NULL;
8c002494 714 do_gettimeofday(&start);
546e0d27 715 for ( ; ; ) {
d3c1b24c 716 error = snapshot_write_next(snapshot);
546e0d27
AM
717 if (error <= 0)
718 break;
719 error = swap_read_page(handle, data_of(*snapshot), &bio);
720 if (error)
721 break;
722 if (snapshot->sync_read)
8a0d613f 723 error = hib_wait_on_bio_chain(&bio);
546e0d27
AM
724 if (error)
725 break;
726 if (!(nr_pages % m))
727 printk("\b\b\b\b%3d%%", nr_pages / m);
728 nr_pages++;
729 }
8a0d613f 730 err2 = hib_wait_on_bio_chain(&bio);
8c002494 731 do_gettimeofday(&stop);
546e0d27
AM
732 if (!error)
733 error = err2;
e655a250 734 if (!error) {
61159a31 735 printk("\b\b\b\bdone\n");
8357376d 736 snapshot_write_finalize(snapshot);
e655a250
CK
737 if (!snapshot_image_loaded(snapshot))
738 error = -ENODATA;
bf9fd67a
JS
739 } else
740 printk("\n");
0d3a9abe 741 swsusp_show_speed(&start, &stop, nr_to_read, "Read");
61159a31
RW
742 return error;
743}
744
f996fc96
BS
745/**
746 * load_image_lzo - Load compressed image data and decompress them with LZO.
747 * @handle: Swap map handle to use for loading data.
748 * @snapshot: Image to copy uncompressed data into.
749 * @nr_to_read: Number of pages to load.
750 */
751static int load_image_lzo(struct swap_map_handle *handle,
752 struct snapshot_handle *snapshot,
753 unsigned int nr_to_read)
754{
755 unsigned int m;
756 int error = 0;
9f339caf 757 struct bio *bio;
f996fc96
BS
758 struct timeval start;
759 struct timeval stop;
760 unsigned nr_pages;
9f339caf
BS
761 size_t i, off, unc_len, cmp_len;
762 unsigned char *unc, *cmp, *page[LZO_CMP_PAGES];
f996fc96 763
9f339caf
BS
764 for (i = 0; i < LZO_CMP_PAGES; i++) {
765 page[i] = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
766 if (!page[i]) {
767 printk(KERN_ERR "PM: Failed to allocate LZO page\n");
768
769 while (i)
770 free_page((unsigned long)page[--i]);
771
772 return -ENOMEM;
773 }
f996fc96
BS
774 }
775
776 unc = vmalloc(LZO_UNC_SIZE);
777 if (!unc) {
778 printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n");
9f339caf
BS
779
780 for (i = 0; i < LZO_CMP_PAGES; i++)
781 free_page((unsigned long)page[i]);
782
f996fc96
BS
783 return -ENOMEM;
784 }
785
786 cmp = vmalloc(LZO_CMP_SIZE);
787 if (!cmp) {
788 printk(KERN_ERR "PM: Failed to allocate LZO compressed\n");
9f339caf 789
f996fc96 790 vfree(unc);
9f339caf
BS
791 for (i = 0; i < LZO_CMP_PAGES; i++)
792 free_page((unsigned long)page[i]);
793
f996fc96
BS
794 return -ENOMEM;
795 }
796
797 printk(KERN_INFO
798 "PM: Loading and decompressing image data (%u pages) ... ",
799 nr_to_read);
800 m = nr_to_read / 100;
801 if (!m)
802 m = 1;
803 nr_pages = 0;
9f339caf 804 bio = NULL;
f996fc96
BS
805 do_gettimeofday(&start);
806
807 error = snapshot_write_next(snapshot);
808 if (error <= 0)
809 goto out_finish;
810
811 for (;;) {
9f339caf 812 error = swap_read_page(handle, page[0], NULL); /* sync */
f996fc96
BS
813 if (error)
814 break;
815
9f339caf 816 cmp_len = *(size_t *)page[0];
f996fc96
BS
817 if (unlikely(!cmp_len ||
818 cmp_len > lzo1x_worst_compress(LZO_UNC_SIZE))) {
819 printk(KERN_ERR "PM: Invalid LZO compressed length\n");
820 error = -1;
821 break;
822 }
823
9f339caf
BS
824 for (off = PAGE_SIZE, i = 1;
825 off < LZO_HEADER + cmp_len; off += PAGE_SIZE, i++) {
826 error = swap_read_page(handle, page[i], &bio);
f996fc96
BS
827 if (error)
828 goto out_finish;
9f339caf 829 }
f996fc96 830
9f339caf
BS
831 error = hib_wait_on_bio_chain(&bio); /* need all data now */
832 if (error)
833 goto out_finish;
834
835 for (off = 0, i = 0;
836 off < LZO_HEADER + cmp_len; off += PAGE_SIZE, i++) {
837 memcpy(cmp + off, page[i], PAGE_SIZE);
f996fc96
BS
838 }
839
840 unc_len = LZO_UNC_SIZE;
841 error = lzo1x_decompress_safe(cmp + LZO_HEADER, cmp_len,
842 unc, &unc_len);
843 if (error < 0) {
844 printk(KERN_ERR "PM: LZO decompression failed\n");
845 break;
846 }
847
848 if (unlikely(!unc_len ||
849 unc_len > LZO_UNC_SIZE ||
850 unc_len & (PAGE_SIZE - 1))) {
851 printk(KERN_ERR "PM: Invalid LZO uncompressed length\n");
852 error = -1;
853 break;
854 }
855
856 for (off = 0; off < unc_len; off += PAGE_SIZE) {
857 memcpy(data_of(*snapshot), unc + off, PAGE_SIZE);
858
859 if (!(nr_pages % m))
860 printk("\b\b\b\b%3d%%", nr_pages / m);
861 nr_pages++;
862
863 error = snapshot_write_next(snapshot);
864 if (error <= 0)
865 goto out_finish;
866 }
867 }
868
869out_finish:
870 do_gettimeofday(&stop);
871 if (!error) {
872 printk("\b\b\b\bdone\n");
873 snapshot_write_finalize(snapshot);
874 if (!snapshot_image_loaded(snapshot))
875 error = -ENODATA;
876 } else
877 printk("\n");
878 swsusp_show_speed(&start, &stop, nr_to_read, "Read");
879
880 vfree(cmp);
881 vfree(unc);
9f339caf
BS
882 for (i = 0; i < LZO_CMP_PAGES; i++)
883 free_page((unsigned long)page[i]);
f996fc96
BS
884
885 return error;
886}
887
a634cc10
RW
888/**
889 * swsusp_read - read the hibernation image.
890 * @flags_p: flags passed by the "frozen" kernel in the image header should
b595076a 891 * be written into this memory location
a634cc10
RW
892 */
893
894int swsusp_read(unsigned int *flags_p)
61159a31
RW
895{
896 int error;
897 struct swap_map_handle handle;
898 struct snapshot_handle snapshot;
899 struct swsusp_info *header;
900
61159a31 901 memset(&snapshot, 0, sizeof(struct snapshot_handle));
d3c1b24c 902 error = snapshot_write_next(&snapshot);
61159a31
RW
903 if (error < PAGE_SIZE)
904 return error < 0 ? error : -EFAULT;
905 header = (struct swsusp_info *)data_of(snapshot);
6f612af5
JS
906 error = get_swap_reader(&handle, flags_p);
907 if (error)
908 goto end;
61159a31 909 if (!error)
546e0d27 910 error = swap_read_page(&handle, header, NULL);
f996fc96
BS
911 if (!error) {
912 error = (*flags_p & SF_NOCOMPRESS_MODE) ?
913 load_image(&handle, &snapshot, header->pages - 1) :
914 load_image_lzo(&handle, &snapshot, header->pages - 1);
915 }
6f612af5
JS
916 swap_reader_finish(&handle);
917end:
61159a31 918 if (!error)
23976728 919 pr_debug("PM: Image successfully loaded\n");
61159a31 920 else
23976728 921 pr_debug("PM: Error %d resuming\n", error);
61159a31
RW
922 return error;
923}
924
925/**
926 * swsusp_check - Check for swsusp signature in the resume device
927 */
928
929int swsusp_check(void)
930{
931 int error;
932
d4d77629
TH
933 hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
934 FMODE_READ, NULL);
8a0d613f
JS
935 if (!IS_ERR(hib_resume_bdev)) {
936 set_blocksize(hib_resume_bdev, PAGE_SIZE);
3ecb01df 937 clear_page(swsusp_header);
8a0d613f 938 error = hib_bio_read_page(swsusp_resume_block,
1b29c164 939 swsusp_header, NULL);
9a154d9d 940 if (error)
76b57e61 941 goto put;
9a154d9d 942
3624eb04 943 if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1b29c164 944 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
61159a31 945 /* Reset swap signature now */
8a0d613f 946 error = hib_bio_write_page(swsusp_resume_block,
1b29c164 947 swsusp_header, NULL);
61159a31 948 } else {
76b57e61 949 error = -EINVAL;
61159a31 950 }
76b57e61
JS
951
952put:
61159a31 953 if (error)
8a0d613f 954 blkdev_put(hib_resume_bdev, FMODE_READ);
61159a31 955 else
d0941ead 956 pr_debug("PM: Image signature found, resuming\n");
61159a31 957 } else {
8a0d613f 958 error = PTR_ERR(hib_resume_bdev);
61159a31
RW
959 }
960
961 if (error)
d0941ead 962 pr_debug("PM: Image not found (code %d)\n", error);
61159a31
RW
963
964 return error;
965}
966
967/**
968 * swsusp_close - close swap device.
969 */
970
c2dd0dae 971void swsusp_close(fmode_t mode)
61159a31 972{
8a0d613f 973 if (IS_ERR(hib_resume_bdev)) {
23976728 974 pr_debug("PM: Image device not initialised\n");
61159a31
RW
975 return;
976 }
977
8a0d613f 978 blkdev_put(hib_resume_bdev, mode);
61159a31 979}
1b29c164
VG
980
981static int swsusp_header_init(void)
982{
983 swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
984 if (!swsusp_header)
985 panic("Could not allocate memory for swsusp_header\n");
986 return 0;
987}
988
989core_initcall(swsusp_header_init);