ad13c461b657c5da861fc2d108a88d9c6e04837f
[linux-block.git] / kernel / power / swap.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/kernel/power/swap.c
4  *
5  * This file provides functions for reading the suspend image from
6  * and writing it to a swap partition.
7  *
8  * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
9  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
10  * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
11  */
12
13 #define pr_fmt(fmt) "PM: " fmt
14
15 #include <crypto/acompress.h>
16 #include <linux/module.h>
17 #include <linux/file.h>
18 #include <linux/delay.h>
19 #include <linux/bitops.h>
20 #include <linux/device.h>
21 #include <linux/bio.h>
22 #include <linux/blkdev.h>
23 #include <linux/swap.h>
24 #include <linux/swapops.h>
25 #include <linux/pm.h>
26 #include <linux/slab.h>
27 #include <linux/vmalloc.h>
28 #include <linux/cpumask.h>
29 #include <linux/atomic.h>
30 #include <linux/kthread.h>
31 #include <linux/crc32.h>
32 #include <linux/ktime.h>
33
34 #include "power.h"
35
36 #define HIBERNATE_SIG   "S1SUSPEND"
37
38 u32 swsusp_hardware_signature;
39
40 /*
41  * When reading an {un,}compressed image, we may restore pages in place,
42  * in which case some architectures need these pages cleaning before they
43  * can be executed. We don't know which pages these may be, so clean the lot.
44  */
45 static bool clean_pages_on_read;
46 static bool clean_pages_on_decompress;
47
48 /*
49  *      The swap map is a data structure used for keeping track of each page
50  *      written to a swap partition.  It consists of many swap_map_page
51  *      structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
52  *      These structures are stored on the swap and linked together with the
53  *      help of the .next_swap member.
54  *
55  *      The swap map is created during suspend.  The swap map pages are
56  *      allocated and populated one at a time, so we only need one memory
57  *      page to set up the entire structure.
58  *
59  *      During resume we pick up all swap_map_page structures into a list.
60  */
61
62 #define MAP_PAGE_ENTRIES        (PAGE_SIZE / sizeof(sector_t) - 1)
63
64 /*
65  * Number of free pages that are not high.
66  */
67 static inline unsigned long low_free_pages(void)
68 {
69         return nr_free_pages() - nr_free_highpages();
70 }
71
72 /*
73  * Number of pages required to be kept free while writing the image. Always
74  * half of all available low pages before the writing starts.
75  */
76 static inline unsigned long reqd_free_pages(void)
77 {
78         return low_free_pages() / 2;
79 }
80
81 struct swap_map_page {
82         sector_t entries[MAP_PAGE_ENTRIES];
83         sector_t next_swap;
84 };
85
86 struct swap_map_page_list {
87         struct swap_map_page *map;
88         struct swap_map_page_list *next;
89 };
90
91 /*
92  *      The swap_map_handle structure is used for handling swap in
93  *      a file-alike way
94  */
95
96 struct swap_map_handle {
97         struct swap_map_page *cur;
98         struct swap_map_page_list *maps;
99         sector_t cur_swap;
100         sector_t first_sector;
101         unsigned int k;
102         unsigned long reqd_free_pages;
103         u32 crc32;
104 };
105
106 struct swsusp_header {
107         char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
108                       sizeof(u32) - sizeof(u32)];
109         u32     hw_sig;
110         u32     crc32;
111         sector_t image;
112         unsigned int flags;     /* Flags to pass to the "boot" kernel */
113         char    orig_sig[10];
114         char    sig[10];
115 } __packed;
116
117 static struct swsusp_header *swsusp_header;
118
119 /*
120  *      The following functions are used for tracing the allocated
121  *      swap pages, so that they can be freed in case of an error.
122  */
123
124 struct swsusp_extent {
125         struct rb_node node;
126         unsigned long start;
127         unsigned long end;
128 };
129
130 static struct rb_root swsusp_extents = RB_ROOT;
131
132 static int swsusp_extents_insert(unsigned long swap_offset)
133 {
134         struct rb_node **new = &(swsusp_extents.rb_node);
135         struct rb_node *parent = NULL;
136         struct swsusp_extent *ext;
137
138         /* Figure out where to put the new node */
139         while (*new) {
140                 ext = rb_entry(*new, struct swsusp_extent, node);
141                 parent = *new;
142                 if (swap_offset < ext->start) {
143                         /* Try to merge */
144                         if (swap_offset == ext->start - 1) {
145                                 ext->start--;
146                                 return 0;
147                         }
148                         new = &((*new)->rb_left);
149                 } else if (swap_offset > ext->end) {
150                         /* Try to merge */
151                         if (swap_offset == ext->end + 1) {
152                                 ext->end++;
153                                 return 0;
154                         }
155                         new = &((*new)->rb_right);
156                 } else {
157                         /* It already is in the tree */
158                         return -EINVAL;
159                 }
160         }
161         /* Add the new node and rebalance the tree. */
162         ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
163         if (!ext)
164                 return -ENOMEM;
165
166         ext->start = swap_offset;
167         ext->end = swap_offset;
168         rb_link_node(&ext->node, parent, new);
169         rb_insert_color(&ext->node, &swsusp_extents);
170         return 0;
171 }
172
173 /*
174  *      alloc_swapdev_block - allocate a swap page and register that it has
175  *      been allocated, so that it can be freed in case of an error.
176  */
177
178 sector_t alloc_swapdev_block(int swap)
179 {
180         unsigned long offset;
181
182         offset = swp_offset(get_swap_page_of_type(swap));
183         if (offset) {
184                 if (swsusp_extents_insert(offset))
185                         swap_free(swp_entry(swap, offset));
186                 else
187                         return swapdev_block(swap, offset);
188         }
189         return 0;
190 }
191
192 /*
193  *      free_all_swap_pages - free swap pages allocated for saving image data.
194  *      It also frees the extents used to register which swap entries had been
195  *      allocated.
196  */
197
198 void free_all_swap_pages(int swap)
199 {
200         struct rb_node *node;
201
202         while ((node = swsusp_extents.rb_node)) {
203                 struct swsusp_extent *ext;
204
205                 ext = rb_entry(node, struct swsusp_extent, node);
206                 rb_erase(node, &swsusp_extents);
207                 swap_free_nr(swp_entry(swap, ext->start),
208                              ext->end - ext->start + 1);
209
210                 kfree(ext);
211         }
212 }
213
214 int swsusp_swap_in_use(void)
215 {
216         return (swsusp_extents.rb_node != NULL);
217 }
218
219 /*
220  * General things
221  */
222
223 static unsigned short root_swap = 0xffff;
224 static struct file *hib_resume_bdev_file;
225
226 struct hib_bio_batch {
227         atomic_t                count;
228         wait_queue_head_t       wait;
229         blk_status_t            error;
230         struct blk_plug         plug;
231 };
232
233 static void hib_init_batch(struct hib_bio_batch *hb)
234 {
235         atomic_set(&hb->count, 0);
236         init_waitqueue_head(&hb->wait);
237         hb->error = BLK_STS_OK;
238         blk_start_plug(&hb->plug);
239 }
240
241 static void hib_finish_batch(struct hib_bio_batch *hb)
242 {
243         blk_finish_plug(&hb->plug);
244 }
245
246 static void hib_end_io(struct bio *bio)
247 {
248         struct hib_bio_batch *hb = bio->bi_private;
249         struct page *page = bio_first_page_all(bio);
250
251         if (bio->bi_status) {
252                 pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
253                          MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
254                          (unsigned long long)bio->bi_iter.bi_sector);
255         }
256
257         if (bio_data_dir(bio) == WRITE)
258                 put_page(page);
259         else if (clean_pages_on_read)
260                 flush_icache_range((unsigned long)page_address(page),
261                                    (unsigned long)page_address(page) + PAGE_SIZE);
262
263         if (bio->bi_status && !hb->error)
264                 hb->error = bio->bi_status;
265         if (atomic_dec_and_test(&hb->count))
266                 wake_up(&hb->wait);
267
268         bio_put(bio);
269 }
270
271 static int hib_submit_io_sync(blk_opf_t opf, pgoff_t page_off, void *addr)
272 {
273         return bdev_rw_virt(file_bdev(hib_resume_bdev_file),
274                         page_off * (PAGE_SIZE >> 9), addr, PAGE_SIZE, opf);
275 }
276
277 static int hib_submit_io_async(blk_opf_t opf, pgoff_t page_off, void *addr,
278                          struct hib_bio_batch *hb)
279 {
280         struct bio *bio;
281
282         bio = bio_alloc(file_bdev(hib_resume_bdev_file), 1, opf,
283                         GFP_NOIO | __GFP_HIGH);
284         bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
285         bio_add_virt_nofail(bio, addr, PAGE_SIZE);
286         bio->bi_end_io = hib_end_io;
287         bio->bi_private = hb;
288         atomic_inc(&hb->count);
289         submit_bio(bio);
290         return 0;
291 }
292
293 static int hib_wait_io(struct hib_bio_batch *hb)
294 {
295         /*
296          * We are relying on the behavior of blk_plug that a thread with
297          * a plug will flush the plug list before sleeping.
298          */
299         wait_event(hb->wait, atomic_read(&hb->count) == 0);
300         return blk_status_to_errno(hb->error);
301 }
302
303 /*
304  * Saving part
305  */
306 static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
307 {
308         int error;
309
310         hib_submit_io_sync(REQ_OP_READ, swsusp_resume_block, swsusp_header);
311         if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
312             !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
313                 memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
314                 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
315                 swsusp_header->image = handle->first_sector;
316                 if (swsusp_hardware_signature) {
317                         swsusp_header->hw_sig = swsusp_hardware_signature;
318                         flags |= SF_HW_SIG;
319                 }
320                 swsusp_header->flags = flags;
321                 if (flags & SF_CRC32_MODE)
322                         swsusp_header->crc32 = handle->crc32;
323                 error = hib_submit_io_sync(REQ_OP_WRITE | REQ_SYNC,
324                                       swsusp_resume_block, swsusp_header);
325         } else {
326                 pr_err("Swap header not found!\n");
327                 error = -ENODEV;
328         }
329         return error;
330 }
331
332 /*
333  * Hold the swsusp_header flag. This is used in software_resume() in
334  * 'kernel/power/hibernate' to check if the image is compressed and query
335  * for the compression algorithm support(if so).
336  */
337 unsigned int swsusp_header_flags;
338
339 /**
340  *      swsusp_swap_check - check if the resume device is a swap device
341  *      and get its index (if so)
342  *
343  *      This is called before saving image
344  */
345 static int swsusp_swap_check(void)
346 {
347         int res;
348
349         if (swsusp_resume_device)
350                 res = swap_type_of(swsusp_resume_device, swsusp_resume_block);
351         else
352                 res = find_first_swap(&swsusp_resume_device);
353         if (res < 0)
354                 return res;
355         root_swap = res;
356
357         hib_resume_bdev_file = bdev_file_open_by_dev(swsusp_resume_device,
358                         BLK_OPEN_WRITE, NULL, NULL);
359         if (IS_ERR(hib_resume_bdev_file))
360                 return PTR_ERR(hib_resume_bdev_file);
361
362         return 0;
363 }
364
365 /**
366  *      write_page - Write one page to given swap location.
367  *      @buf:           Address we're writing.
368  *      @offset:        Offset of the swap page we're writing to.
369  *      @hb:            bio completion batch
370  */
371
372 static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
373 {
374         gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
375         void *src;
376         int ret;
377
378         if (!offset)
379                 return -ENOSPC;
380
381         if (!hb)
382                 goto sync_io;
383
384         src = (void *)__get_free_page(gfp);
385         if (!src) {
386                 ret = hib_wait_io(hb); /* Free pages */
387                 if (ret)
388                         return ret;
389                 src = (void *)__get_free_page(gfp);
390                 if (WARN_ON_ONCE(!src))
391                         goto sync_io;
392         }
393
394         copy_page(src, buf);
395         return hib_submit_io_async(REQ_OP_WRITE | REQ_SYNC, offset, src, hb);
396 sync_io:
397         return hib_submit_io_sync(REQ_OP_WRITE | REQ_SYNC, offset, buf);
398 }
399
400 static void release_swap_writer(struct swap_map_handle *handle)
401 {
402         if (handle->cur)
403                 free_page((unsigned long)handle->cur);
404         handle->cur = NULL;
405 }
406
407 static int get_swap_writer(struct swap_map_handle *handle)
408 {
409         int ret;
410
411         ret = swsusp_swap_check();
412         if (ret) {
413                 if (ret != -ENOSPC)
414                         pr_err("Cannot find swap device, try swapon -a\n");
415                 return ret;
416         }
417         handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
418         if (!handle->cur) {
419                 ret = -ENOMEM;
420                 goto err_close;
421         }
422         handle->cur_swap = alloc_swapdev_block(root_swap);
423         if (!handle->cur_swap) {
424                 ret = -ENOSPC;
425                 goto err_rel;
426         }
427         handle->k = 0;
428         handle->reqd_free_pages = reqd_free_pages();
429         handle->first_sector = handle->cur_swap;
430         return 0;
431 err_rel:
432         release_swap_writer(handle);
433 err_close:
434         swsusp_close();
435         return ret;
436 }
437
438 static int swap_write_page(struct swap_map_handle *handle, void *buf,
439                 struct hib_bio_batch *hb)
440 {
441         int error;
442         sector_t offset;
443
444         if (!handle->cur)
445                 return -EINVAL;
446         offset = alloc_swapdev_block(root_swap);
447         error = write_page(buf, offset, hb);
448         if (error)
449                 return error;
450         handle->cur->entries[handle->k++] = offset;
451         if (handle->k >= MAP_PAGE_ENTRIES) {
452                 offset = alloc_swapdev_block(root_swap);
453                 if (!offset)
454                         return -ENOSPC;
455                 handle->cur->next_swap = offset;
456                 error = write_page(handle->cur, handle->cur_swap, hb);
457                 if (error)
458                         goto out;
459                 clear_page(handle->cur);
460                 handle->cur_swap = offset;
461                 handle->k = 0;
462
463                 if (hb && low_free_pages() <= handle->reqd_free_pages) {
464                         error = hib_wait_io(hb);
465                         if (error)
466                                 goto out;
467                         /*
468                          * Recalculate the number of required free pages, to
469                          * make sure we never take more than half.
470                          */
471                         handle->reqd_free_pages = reqd_free_pages();
472                 }
473         }
474  out:
475         return error;
476 }
477
478 static int flush_swap_writer(struct swap_map_handle *handle)
479 {
480         if (handle->cur && handle->cur_swap)
481                 return write_page(handle->cur, handle->cur_swap, NULL);
482         else
483                 return -EINVAL;
484 }
485
486 static int swap_writer_finish(struct swap_map_handle *handle,
487                 unsigned int flags, int error)
488 {
489         if (!error) {
490                 pr_info("S");
491                 error = mark_swapfiles(handle, flags);
492                 pr_cont("|\n");
493                 flush_swap_writer(handle);
494         }
495
496         if (error)
497                 free_all_swap_pages(root_swap);
498         release_swap_writer(handle);
499         swsusp_close();
500
501         return error;
502 }
503
504 /*
505  * Bytes we need for compressed data in worst case. We assume(limitation)
506  * this is the worst of all the compression algorithms.
507  */
508 #define bytes_worst_compress(x) ((x) + ((x) / 16) + 64 + 3 + 2)
509
510 /* We need to remember how much compressed data we need to read. */
511 #define CMP_HEADER      sizeof(size_t)
512
513 /* Number of pages/bytes we'll compress at one time. */
514 #define UNC_PAGES       32
515 #define UNC_SIZE        (UNC_PAGES * PAGE_SIZE)
516
517 /* Number of pages we need for compressed data (worst case). */
518 #define CMP_PAGES       DIV_ROUND_UP(bytes_worst_compress(UNC_SIZE) + \
519                                 CMP_HEADER, PAGE_SIZE)
520 #define CMP_SIZE        (CMP_PAGES * PAGE_SIZE)
521
522 /* Maximum number of threads for compression/decompression. */
523 #define CMP_THREADS     3
524
525 /* Minimum/maximum number of pages for read buffering. */
526 #define CMP_MIN_RD_PAGES        1024
527 #define CMP_MAX_RD_PAGES        8192
528
529 /**
530  *      save_image - save the suspend image data
531  */
532
533 static int save_image(struct swap_map_handle *handle,
534                       struct snapshot_handle *snapshot,
535                       unsigned int nr_to_write)
536 {
537         unsigned int m;
538         int ret;
539         int nr_pages;
540         int err2;
541         struct hib_bio_batch hb;
542         ktime_t start;
543         ktime_t stop;
544
545         hib_init_batch(&hb);
546
547         pr_info("Saving image data pages (%u pages)...\n",
548                 nr_to_write);
549         m = nr_to_write / 10;
550         if (!m)
551                 m = 1;
552         nr_pages = 0;
553         start = ktime_get();
554         while (1) {
555                 ret = snapshot_read_next(snapshot);
556                 if (ret <= 0)
557                         break;
558                 ret = swap_write_page(handle, data_of(*snapshot), &hb);
559                 if (ret)
560                         break;
561                 if (!(nr_pages % m))
562                         pr_info("Image saving progress: %3d%%\n",
563                                 nr_pages / m * 10);
564                 nr_pages++;
565         }
566         err2 = hib_wait_io(&hb);
567         hib_finish_batch(&hb);
568         stop = ktime_get();
569         if (!ret)
570                 ret = err2;
571         if (!ret)
572                 pr_info("Image saving done\n");
573         swsusp_show_speed(start, stop, nr_to_write, "Wrote");
574         return ret;
575 }
576
577 /*
578  * Structure used for CRC32.
579  */
580 struct crc_data {
581         struct task_struct *thr;                  /* thread */
582         atomic_t ready;                           /* ready to start flag */
583         atomic_t stop;                            /* ready to stop flag */
584         unsigned run_threads;                     /* nr current threads */
585         wait_queue_head_t go;                     /* start crc update */
586         wait_queue_head_t done;                   /* crc update done */
587         u32 *crc32;                               /* points to handle's crc32 */
588         size_t *unc_len[CMP_THREADS];             /* uncompressed lengths */
589         unsigned char *unc[CMP_THREADS];          /* uncompressed data */
590 };
591
592 /*
593  * CRC32 update function that runs in its own thread.
594  */
595 static int crc32_threadfn(void *data)
596 {
597         struct crc_data *d = data;
598         unsigned i;
599
600         while (1) {
601                 wait_event(d->go, atomic_read_acquire(&d->ready) ||
602                                   kthread_should_stop());
603                 if (kthread_should_stop()) {
604                         d->thr = NULL;
605                         atomic_set_release(&d->stop, 1);
606                         wake_up(&d->done);
607                         break;
608                 }
609                 atomic_set(&d->ready, 0);
610
611                 for (i = 0; i < d->run_threads; i++)
612                         *d->crc32 = crc32_le(*d->crc32,
613                                              d->unc[i], *d->unc_len[i]);
614                 atomic_set_release(&d->stop, 1);
615                 wake_up(&d->done);
616         }
617         return 0;
618 }
619 /*
620  * Structure used for data compression.
621  */
622 struct cmp_data {
623         struct task_struct *thr;                  /* thread */
624         struct crypto_acomp *cc;                  /* crypto compressor */
625         struct acomp_req *cr;                     /* crypto request */
626         atomic_t ready;                           /* ready to start flag */
627         atomic_t stop;                            /* ready to stop flag */
628         int ret;                                  /* return code */
629         wait_queue_head_t go;                     /* start compression */
630         wait_queue_head_t done;                   /* compression done */
631         size_t unc_len;                           /* uncompressed length */
632         size_t cmp_len;                           /* compressed length */
633         unsigned char unc[UNC_SIZE];              /* uncompressed buffer */
634         unsigned char cmp[CMP_SIZE];              /* compressed buffer */
635 };
636
637 /* Indicates the image size after compression */
638 static atomic_t compressed_size = ATOMIC_INIT(0);
639
640 /*
641  * Compression function that runs in its own thread.
642  */
643 static int compress_threadfn(void *data)
644 {
645         struct cmp_data *d = data;
646
647         while (1) {
648                 wait_event(d->go, atomic_read_acquire(&d->ready) ||
649                                   kthread_should_stop());
650                 if (kthread_should_stop()) {
651                         d->thr = NULL;
652                         d->ret = -1;
653                         atomic_set_release(&d->stop, 1);
654                         wake_up(&d->done);
655                         break;
656                 }
657                 atomic_set(&d->ready, 0);
658
659                 acomp_request_set_callback(d->cr, CRYPTO_TFM_REQ_MAY_SLEEP,
660                                            NULL, NULL);
661                 acomp_request_set_src_nondma(d->cr, d->unc, d->unc_len);
662                 acomp_request_set_dst_nondma(d->cr, d->cmp + CMP_HEADER,
663                                              CMP_SIZE - CMP_HEADER);
664                 d->ret = crypto_acomp_compress(d->cr);
665                 d->cmp_len = d->cr->dlen;
666
667                 atomic_set(&compressed_size, atomic_read(&compressed_size) + d->cmp_len);
668                 atomic_set_release(&d->stop, 1);
669                 wake_up(&d->done);
670         }
671         return 0;
672 }
673
674 /**
675  * save_compressed_image - Save the suspend image data after compression.
676  * @handle: Swap map handle to use for saving the image.
677  * @snapshot: Image to read data from.
678  * @nr_to_write: Number of pages to save.
679  */
680 static int save_compressed_image(struct swap_map_handle *handle,
681                                  struct snapshot_handle *snapshot,
682                                  unsigned int nr_to_write)
683 {
684         unsigned int m;
685         int ret = 0;
686         int nr_pages;
687         int err2;
688         struct hib_bio_batch hb;
689         ktime_t start;
690         ktime_t stop;
691         size_t off;
692         unsigned thr, run_threads, nr_threads;
693         unsigned char *page = NULL;
694         struct cmp_data *data = NULL;
695         struct crc_data *crc = NULL;
696
697         hib_init_batch(&hb);
698
699         atomic_set(&compressed_size, 0);
700
701         /*
702          * We'll limit the number of threads for compression to limit memory
703          * footprint.
704          */
705         nr_threads = num_online_cpus() - 1;
706         nr_threads = clamp_val(nr_threads, 1, CMP_THREADS);
707
708         page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
709         if (!page) {
710                 pr_err("Failed to allocate %s page\n", hib_comp_algo);
711                 ret = -ENOMEM;
712                 goto out_clean;
713         }
714
715         data = vzalloc(array_size(nr_threads, sizeof(*data)));
716         if (!data) {
717                 pr_err("Failed to allocate %s data\n", hib_comp_algo);
718                 ret = -ENOMEM;
719                 goto out_clean;
720         }
721
722         crc = kzalloc(sizeof(*crc), GFP_KERNEL);
723         if (!crc) {
724                 pr_err("Failed to allocate crc\n");
725                 ret = -ENOMEM;
726                 goto out_clean;
727         }
728
729         /*
730          * Start the compression threads.
731          */
732         for (thr = 0; thr < nr_threads; thr++) {
733                 init_waitqueue_head(&data[thr].go);
734                 init_waitqueue_head(&data[thr].done);
735
736                 data[thr].cc = crypto_alloc_acomp(hib_comp_algo, 0, CRYPTO_ALG_ASYNC);
737                 if (IS_ERR_OR_NULL(data[thr].cc)) {
738                         pr_err("Could not allocate comp stream %ld\n", PTR_ERR(data[thr].cc));
739                         ret = -EFAULT;
740                         goto out_clean;
741                 }
742
743                 data[thr].cr = acomp_request_alloc(data[thr].cc);
744                 if (!data[thr].cr) {
745                         pr_err("Could not allocate comp request\n");
746                         ret = -ENOMEM;
747                         goto out_clean;
748                 }
749
750                 data[thr].thr = kthread_run(compress_threadfn,
751                                             &data[thr],
752                                             "image_compress/%u", thr);
753                 if (IS_ERR(data[thr].thr)) {
754                         data[thr].thr = NULL;
755                         pr_err("Cannot start compression threads\n");
756                         ret = -ENOMEM;
757                         goto out_clean;
758                 }
759         }
760
761         /*
762          * Start the CRC32 thread.
763          */
764         init_waitqueue_head(&crc->go);
765         init_waitqueue_head(&crc->done);
766
767         handle->crc32 = 0;
768         crc->crc32 = &handle->crc32;
769         for (thr = 0; thr < nr_threads; thr++) {
770                 crc->unc[thr] = data[thr].unc;
771                 crc->unc_len[thr] = &data[thr].unc_len;
772         }
773
774         crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
775         if (IS_ERR(crc->thr)) {
776                 crc->thr = NULL;
777                 pr_err("Cannot start CRC32 thread\n");
778                 ret = -ENOMEM;
779                 goto out_clean;
780         }
781
782         /*
783          * Adjust the number of required free pages after all allocations have
784          * been done. We don't want to run out of pages when writing.
785          */
786         handle->reqd_free_pages = reqd_free_pages();
787
788         pr_info("Using %u thread(s) for %s compression\n", nr_threads, hib_comp_algo);
789         pr_info("Compressing and saving image data (%u pages)...\n",
790                 nr_to_write);
791         m = nr_to_write / 10;
792         if (!m)
793                 m = 1;
794         nr_pages = 0;
795         start = ktime_get();
796         for (;;) {
797                 for (thr = 0; thr < nr_threads; thr++) {
798                         for (off = 0; off < UNC_SIZE; off += PAGE_SIZE) {
799                                 ret = snapshot_read_next(snapshot);
800                                 if (ret < 0)
801                                         goto out_finish;
802
803                                 if (!ret)
804                                         break;
805
806                                 memcpy(data[thr].unc + off,
807                                        data_of(*snapshot), PAGE_SIZE);
808
809                                 if (!(nr_pages % m))
810                                         pr_info("Image saving progress: %3d%%\n",
811                                                 nr_pages / m * 10);
812                                 nr_pages++;
813                         }
814                         if (!off)
815                                 break;
816
817                         data[thr].unc_len = off;
818
819                         atomic_set_release(&data[thr].ready, 1);
820                         wake_up(&data[thr].go);
821                 }
822
823                 if (!thr)
824                         break;
825
826                 crc->run_threads = thr;
827                 atomic_set_release(&crc->ready, 1);
828                 wake_up(&crc->go);
829
830                 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
831                         wait_event(data[thr].done,
832                                 atomic_read_acquire(&data[thr].stop));
833                         atomic_set(&data[thr].stop, 0);
834
835                         ret = data[thr].ret;
836
837                         if (ret < 0) {
838                                 pr_err("%s compression failed\n", hib_comp_algo);
839                                 goto out_finish;
840                         }
841
842                         if (unlikely(!data[thr].cmp_len ||
843                                      data[thr].cmp_len >
844                                      bytes_worst_compress(data[thr].unc_len))) {
845                                 pr_err("Invalid %s compressed length\n", hib_comp_algo);
846                                 ret = -1;
847                                 goto out_finish;
848                         }
849
850                         *(size_t *)data[thr].cmp = data[thr].cmp_len;
851
852                         /*
853                          * Given we are writing one page at a time to disk, we
854                          * copy that much from the buffer, although the last
855                          * bit will likely be smaller than full page. This is
856                          * OK - we saved the length of the compressed data, so
857                          * any garbage at the end will be discarded when we
858                          * read it.
859                          */
860                         for (off = 0;
861                              off < CMP_HEADER + data[thr].cmp_len;
862                              off += PAGE_SIZE) {
863                                 memcpy(page, data[thr].cmp + off, PAGE_SIZE);
864
865                                 ret = swap_write_page(handle, page, &hb);
866                                 if (ret)
867                                         goto out_finish;
868                         }
869                 }
870
871                 wait_event(crc->done, atomic_read_acquire(&crc->stop));
872                 atomic_set(&crc->stop, 0);
873         }
874
875 out_finish:
876         err2 = hib_wait_io(&hb);
877         stop = ktime_get();
878         if (!ret)
879                 ret = err2;
880         if (!ret)
881                 pr_info("Image saving done\n");
882         swsusp_show_speed(start, stop, nr_to_write, "Wrote");
883         pr_info("Image size after compression: %d kbytes\n",
884                 (atomic_read(&compressed_size) / 1024));
885
886 out_clean:
887         hib_finish_batch(&hb);
888         if (crc) {
889                 if (crc->thr)
890                         kthread_stop(crc->thr);
891                 kfree(crc);
892         }
893         if (data) {
894                 for (thr = 0; thr < nr_threads; thr++) {
895                         if (data[thr].thr)
896                                 kthread_stop(data[thr].thr);
897                         acomp_request_free(data[thr].cr);
898                         crypto_free_acomp(data[thr].cc);
899                 }
900                 vfree(data);
901         }
902         if (page) free_page((unsigned long)page);
903
904         return ret;
905 }
906
907 /**
908  *      enough_swap - Make sure we have enough swap to save the image.
909  *
910  *      Returns TRUE or FALSE after checking the total amount of swap
911  *      space available from the resume partition.
912  */
913
914 static int enough_swap(unsigned int nr_pages)
915 {
916         unsigned int free_swap = count_swap_pages(root_swap, 1);
917         unsigned int required;
918
919         pr_debug("Free swap pages: %u\n", free_swap);
920
921         required = PAGES_FOR_IO + nr_pages;
922         return free_swap > required;
923 }
924
925 /**
926  *      swsusp_write - Write entire image and metadata.
927  *      @flags: flags to pass to the "boot" kernel in the image header
928  *
929  *      It is important _NOT_ to umount filesystems at this point. We want
930  *      them synced (in case something goes wrong) but we DO not want to mark
931  *      filesystem clean: it is not. (And it does not matter, if we resume
932  *      correctly, we'll mark system clean, anyway.)
933  */
934
935 int swsusp_write(unsigned int flags)
936 {
937         struct swap_map_handle handle;
938         struct snapshot_handle snapshot;
939         struct swsusp_info *header;
940         unsigned long pages;
941         int error;
942
943         pages = snapshot_get_image_size();
944         error = get_swap_writer(&handle);
945         if (error) {
946                 pr_err("Cannot get swap writer\n");
947                 return error;
948         }
949         if (flags & SF_NOCOMPRESS_MODE) {
950                 if (!enough_swap(pages)) {
951                         pr_err("Not enough free swap\n");
952                         error = -ENOSPC;
953                         goto out_finish;
954                 }
955         }
956         memset(&snapshot, 0, sizeof(struct snapshot_handle));
957         error = snapshot_read_next(&snapshot);
958         if (error < (int)PAGE_SIZE) {
959                 if (error >= 0)
960                         error = -EFAULT;
961
962                 goto out_finish;
963         }
964         header = (struct swsusp_info *)data_of(snapshot);
965         error = swap_write_page(&handle, header, NULL);
966         if (!error) {
967                 error = (flags & SF_NOCOMPRESS_MODE) ?
968                         save_image(&handle, &snapshot, pages - 1) :
969                         save_compressed_image(&handle, &snapshot, pages - 1);
970         }
971 out_finish:
972         error = swap_writer_finish(&handle, flags, error);
973         return error;
974 }
975
976 /*
977  *      The following functions allow us to read data using a swap map
978  *      in a file-like way.
979  */
980
981 static void release_swap_reader(struct swap_map_handle *handle)
982 {
983         struct swap_map_page_list *tmp;
984
985         while (handle->maps) {
986                 if (handle->maps->map)
987                         free_page((unsigned long)handle->maps->map);
988                 tmp = handle->maps;
989                 handle->maps = handle->maps->next;
990                 kfree(tmp);
991         }
992         handle->cur = NULL;
993 }
994
995 static int get_swap_reader(struct swap_map_handle *handle,
996                 unsigned int *flags_p)
997 {
998         int error;
999         struct swap_map_page_list *tmp, *last;
1000         sector_t offset;
1001
1002         *flags_p = swsusp_header->flags;
1003
1004         if (!swsusp_header->image) /* how can this happen? */
1005                 return -EINVAL;
1006
1007         handle->cur = NULL;
1008         last = handle->maps = NULL;
1009         offset = swsusp_header->image;
1010         while (offset) {
1011                 tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL);
1012                 if (!tmp) {
1013                         release_swap_reader(handle);
1014                         return -ENOMEM;
1015                 }
1016                 if (!handle->maps)
1017                         handle->maps = tmp;
1018                 if (last)
1019                         last->next = tmp;
1020                 last = tmp;
1021
1022                 tmp->map = (struct swap_map_page *)
1023                            __get_free_page(GFP_NOIO | __GFP_HIGH);
1024                 if (!tmp->map) {
1025                         release_swap_reader(handle);
1026                         return -ENOMEM;
1027                 }
1028
1029                 error = hib_submit_io_sync(REQ_OP_READ, offset, tmp->map);
1030                 if (error) {
1031                         release_swap_reader(handle);
1032                         return error;
1033                 }
1034                 offset = tmp->map->next_swap;
1035         }
1036         handle->k = 0;
1037         handle->cur = handle->maps->map;
1038         return 0;
1039 }
1040
1041 static int swap_read_page(struct swap_map_handle *handle, void *buf,
1042                 struct hib_bio_batch *hb)
1043 {
1044         sector_t offset;
1045         int error;
1046         struct swap_map_page_list *tmp;
1047
1048         if (!handle->cur)
1049                 return -EINVAL;
1050         offset = handle->cur->entries[handle->k];
1051         if (!offset)
1052                 return -EFAULT;
1053         if (hb)
1054                 error = hib_submit_io_async(REQ_OP_READ, offset, buf, hb);
1055         else
1056                 error = hib_submit_io_sync(REQ_OP_READ, offset, buf);
1057         if (error)
1058                 return error;
1059         if (++handle->k >= MAP_PAGE_ENTRIES) {
1060                 handle->k = 0;
1061                 free_page((unsigned long)handle->maps->map);
1062                 tmp = handle->maps;
1063                 handle->maps = handle->maps->next;
1064                 kfree(tmp);
1065                 if (!handle->maps)
1066                         release_swap_reader(handle);
1067                 else
1068                         handle->cur = handle->maps->map;
1069         }
1070         return error;
1071 }
1072
1073 static int swap_reader_finish(struct swap_map_handle *handle)
1074 {
1075         release_swap_reader(handle);
1076
1077         return 0;
1078 }
1079
1080 /**
1081  *      load_image - load the image using the swap map handle
1082  *      @handle and the snapshot handle @snapshot
1083  *      (assume there are @nr_pages pages to load)
1084  */
1085
1086 static int load_image(struct swap_map_handle *handle,
1087                       struct snapshot_handle *snapshot,
1088                       unsigned int nr_to_read)
1089 {
1090         unsigned int m;
1091         int ret = 0;
1092         ktime_t start;
1093         ktime_t stop;
1094         struct hib_bio_batch hb;
1095         int err2;
1096         unsigned nr_pages;
1097
1098         hib_init_batch(&hb);
1099
1100         clean_pages_on_read = true;
1101         pr_info("Loading image data pages (%u pages)...\n", nr_to_read);
1102         m = nr_to_read / 10;
1103         if (!m)
1104                 m = 1;
1105         nr_pages = 0;
1106         start = ktime_get();
1107         for ( ; ; ) {
1108                 ret = snapshot_write_next(snapshot);
1109                 if (ret <= 0)
1110                         break;
1111                 ret = swap_read_page(handle, data_of(*snapshot), &hb);
1112                 if (ret)
1113                         break;
1114                 if (snapshot->sync_read)
1115                         ret = hib_wait_io(&hb);
1116                 if (ret)
1117                         break;
1118                 if (!(nr_pages % m))
1119                         pr_info("Image loading progress: %3d%%\n",
1120                                 nr_pages / m * 10);
1121                 nr_pages++;
1122         }
1123         err2 = hib_wait_io(&hb);
1124         hib_finish_batch(&hb);
1125         stop = ktime_get();
1126         if (!ret)
1127                 ret = err2;
1128         if (!ret) {
1129                 pr_info("Image loading done\n");
1130                 ret = snapshot_write_finalize(snapshot);
1131                 if (!ret && !snapshot_image_loaded(snapshot))
1132                         ret = -ENODATA;
1133         }
1134         swsusp_show_speed(start, stop, nr_to_read, "Read");
1135         return ret;
1136 }
1137
1138 /*
1139  * Structure used for data decompression.
1140  */
1141 struct dec_data {
1142         struct task_struct *thr;                  /* thread */
1143         struct crypto_acomp *cc;                  /* crypto compressor */
1144         struct acomp_req *cr;                     /* crypto request */
1145         atomic_t ready;                           /* ready to start flag */
1146         atomic_t stop;                            /* ready to stop flag */
1147         int ret;                                  /* return code */
1148         wait_queue_head_t go;                     /* start decompression */
1149         wait_queue_head_t done;                   /* decompression done */
1150         size_t unc_len;                           /* uncompressed length */
1151         size_t cmp_len;                           /* compressed length */
1152         unsigned char unc[UNC_SIZE];              /* uncompressed buffer */
1153         unsigned char cmp[CMP_SIZE];              /* compressed buffer */
1154 };
1155
1156 /*
1157  * Decompression function that runs in its own thread.
1158  */
1159 static int decompress_threadfn(void *data)
1160 {
1161         struct dec_data *d = data;
1162
1163         while (1) {
1164                 wait_event(d->go, atomic_read_acquire(&d->ready) ||
1165                                   kthread_should_stop());
1166                 if (kthread_should_stop()) {
1167                         d->thr = NULL;
1168                         d->ret = -1;
1169                         atomic_set_release(&d->stop, 1);
1170                         wake_up(&d->done);
1171                         break;
1172                 }
1173                 atomic_set(&d->ready, 0);
1174
1175                 acomp_request_set_callback(d->cr, CRYPTO_TFM_REQ_MAY_SLEEP,
1176                                            NULL, NULL);
1177                 acomp_request_set_src_nondma(d->cr, d->cmp + CMP_HEADER,
1178                                              d->cmp_len);
1179                 acomp_request_set_dst_nondma(d->cr, d->unc, UNC_SIZE);
1180                 d->ret = crypto_acomp_decompress(d->cr);
1181                 d->unc_len = d->cr->dlen;
1182
1183                 if (clean_pages_on_decompress)
1184                         flush_icache_range((unsigned long)d->unc,
1185                                            (unsigned long)d->unc + d->unc_len);
1186
1187                 atomic_set_release(&d->stop, 1);
1188                 wake_up(&d->done);
1189         }
1190         return 0;
1191 }
1192
1193 /**
1194  * load_compressed_image - Load compressed image data and decompress it.
1195  * @handle: Swap map handle to use for loading data.
1196  * @snapshot: Image to copy uncompressed data into.
1197  * @nr_to_read: Number of pages to load.
1198  */
1199 static int load_compressed_image(struct swap_map_handle *handle,
1200                                  struct snapshot_handle *snapshot,
1201                                  unsigned int nr_to_read)
1202 {
1203         unsigned int m;
1204         int ret = 0;
1205         int eof = 0;
1206         struct hib_bio_batch hb;
1207         ktime_t start;
1208         ktime_t stop;
1209         unsigned nr_pages;
1210         size_t off;
1211         unsigned i, thr, run_threads, nr_threads;
1212         unsigned ring = 0, pg = 0, ring_size = 0,
1213                  have = 0, want, need, asked = 0;
1214         unsigned long read_pages = 0;
1215         unsigned char **page = NULL;
1216         struct dec_data *data = NULL;
1217         struct crc_data *crc = NULL;
1218
1219         hib_init_batch(&hb);
1220
1221         /*
1222          * We'll limit the number of threads for decompression to limit memory
1223          * footprint.
1224          */
1225         nr_threads = num_online_cpus() - 1;
1226         nr_threads = clamp_val(nr_threads, 1, CMP_THREADS);
1227
1228         page = vmalloc(array_size(CMP_MAX_RD_PAGES, sizeof(*page)));
1229         if (!page) {
1230                 pr_err("Failed to allocate %s page\n", hib_comp_algo);
1231                 ret = -ENOMEM;
1232                 goto out_clean;
1233         }
1234
1235         data = vzalloc(array_size(nr_threads, sizeof(*data)));
1236         if (!data) {
1237                 pr_err("Failed to allocate %s data\n", hib_comp_algo);
1238                 ret = -ENOMEM;
1239                 goto out_clean;
1240         }
1241
1242         crc = kzalloc(sizeof(*crc), GFP_KERNEL);
1243         if (!crc) {
1244                 pr_err("Failed to allocate crc\n");
1245                 ret = -ENOMEM;
1246                 goto out_clean;
1247         }
1248
1249         clean_pages_on_decompress = true;
1250
1251         /*
1252          * Start the decompression threads.
1253          */
1254         for (thr = 0; thr < nr_threads; thr++) {
1255                 init_waitqueue_head(&data[thr].go);
1256                 init_waitqueue_head(&data[thr].done);
1257
1258                 data[thr].cc = crypto_alloc_acomp(hib_comp_algo, 0, CRYPTO_ALG_ASYNC);
1259                 if (IS_ERR_OR_NULL(data[thr].cc)) {
1260                         pr_err("Could not allocate comp stream %ld\n", PTR_ERR(data[thr].cc));
1261                         ret = -EFAULT;
1262                         goto out_clean;
1263                 }
1264
1265                 data[thr].cr = acomp_request_alloc(data[thr].cc);
1266                 if (!data[thr].cr) {
1267                         pr_err("Could not allocate comp request\n");
1268                         ret = -ENOMEM;
1269                         goto out_clean;
1270                 }
1271
1272                 data[thr].thr = kthread_run(decompress_threadfn,
1273                                             &data[thr],
1274                                             "image_decompress/%u", thr);
1275                 if (IS_ERR(data[thr].thr)) {
1276                         data[thr].thr = NULL;
1277                         pr_err("Cannot start decompression threads\n");
1278                         ret = -ENOMEM;
1279                         goto out_clean;
1280                 }
1281         }
1282
1283         /*
1284          * Start the CRC32 thread.
1285          */
1286         init_waitqueue_head(&crc->go);
1287         init_waitqueue_head(&crc->done);
1288
1289         handle->crc32 = 0;
1290         crc->crc32 = &handle->crc32;
1291         for (thr = 0; thr < nr_threads; thr++) {
1292                 crc->unc[thr] = data[thr].unc;
1293                 crc->unc_len[thr] = &data[thr].unc_len;
1294         }
1295
1296         crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1297         if (IS_ERR(crc->thr)) {
1298                 crc->thr = NULL;
1299                 pr_err("Cannot start CRC32 thread\n");
1300                 ret = -ENOMEM;
1301                 goto out_clean;
1302         }
1303
1304         /*
1305          * Set the number of pages for read buffering.
1306          * This is complete guesswork, because we'll only know the real
1307          * picture once prepare_image() is called, which is much later on
1308          * during the image load phase. We'll assume the worst case and
1309          * say that none of the image pages are from high memory.
1310          */
1311         if (low_free_pages() > snapshot_get_image_size())
1312                 read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
1313         read_pages = clamp_val(read_pages, CMP_MIN_RD_PAGES, CMP_MAX_RD_PAGES);
1314
1315         for (i = 0; i < read_pages; i++) {
1316                 page[i] = (void *)__get_free_page(i < CMP_PAGES ?
1317                                                   GFP_NOIO | __GFP_HIGH :
1318                                                   GFP_NOIO | __GFP_NOWARN |
1319                                                   __GFP_NORETRY);
1320
1321                 if (!page[i]) {
1322                         if (i < CMP_PAGES) {
1323                                 ring_size = i;
1324                                 pr_err("Failed to allocate %s pages\n", hib_comp_algo);
1325                                 ret = -ENOMEM;
1326                                 goto out_clean;
1327                         } else {
1328                                 break;
1329                         }
1330                 }
1331         }
1332         want = ring_size = i;
1333
1334         pr_info("Using %u thread(s) for %s decompression\n", nr_threads, hib_comp_algo);
1335         pr_info("Loading and decompressing image data (%u pages)...\n",
1336                 nr_to_read);
1337         m = nr_to_read / 10;
1338         if (!m)
1339                 m = 1;
1340         nr_pages = 0;
1341         start = ktime_get();
1342
1343         ret = snapshot_write_next(snapshot);
1344         if (ret <= 0)
1345                 goto out_finish;
1346
1347         for(;;) {
1348                 for (i = 0; !eof && i < want; i++) {
1349                         ret = swap_read_page(handle, page[ring], &hb);
1350                         if (ret) {
1351                                 /*
1352                                  * On real read error, finish. On end of data,
1353                                  * set EOF flag and just exit the read loop.
1354                                  */
1355                                 if (handle->cur &&
1356                                     handle->cur->entries[handle->k]) {
1357                                         goto out_finish;
1358                                 } else {
1359                                         eof = 1;
1360                                         break;
1361                                 }
1362                         }
1363                         if (++ring >= ring_size)
1364                                 ring = 0;
1365                 }
1366                 asked += i;
1367                 want -= i;
1368
1369                 /*
1370                  * We are out of data, wait for some more.
1371                  */
1372                 if (!have) {
1373                         if (!asked)
1374                                 break;
1375
1376                         ret = hib_wait_io(&hb);
1377                         if (ret)
1378                                 goto out_finish;
1379                         have += asked;
1380                         asked = 0;
1381                         if (eof)
1382                                 eof = 2;
1383                 }
1384
1385                 if (crc->run_threads) {
1386                         wait_event(crc->done, atomic_read_acquire(&crc->stop));
1387                         atomic_set(&crc->stop, 0);
1388                         crc->run_threads = 0;
1389                 }
1390
1391                 for (thr = 0; have && thr < nr_threads; thr++) {
1392                         data[thr].cmp_len = *(size_t *)page[pg];
1393                         if (unlikely(!data[thr].cmp_len ||
1394                                      data[thr].cmp_len >
1395                                         bytes_worst_compress(UNC_SIZE))) {
1396                                 pr_err("Invalid %s compressed length\n", hib_comp_algo);
1397                                 ret = -1;
1398                                 goto out_finish;
1399                         }
1400
1401                         need = DIV_ROUND_UP(data[thr].cmp_len + CMP_HEADER,
1402                                             PAGE_SIZE);
1403                         if (need > have) {
1404                                 if (eof > 1) {
1405                                         ret = -1;
1406                                         goto out_finish;
1407                                 }
1408                                 break;
1409                         }
1410
1411                         for (off = 0;
1412                              off < CMP_HEADER + data[thr].cmp_len;
1413                              off += PAGE_SIZE) {
1414                                 memcpy(data[thr].cmp + off,
1415                                        page[pg], PAGE_SIZE);
1416                                 have--;
1417                                 want++;
1418                                 if (++pg >= ring_size)
1419                                         pg = 0;
1420                         }
1421
1422                         atomic_set_release(&data[thr].ready, 1);
1423                         wake_up(&data[thr].go);
1424                 }
1425
1426                 /*
1427                  * Wait for more data while we are decompressing.
1428                  */
1429                 if (have < CMP_PAGES && asked) {
1430                         ret = hib_wait_io(&hb);
1431                         if (ret)
1432                                 goto out_finish;
1433                         have += asked;
1434                         asked = 0;
1435                         if (eof)
1436                                 eof = 2;
1437                 }
1438
1439                 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1440                         wait_event(data[thr].done,
1441                                 atomic_read_acquire(&data[thr].stop));
1442                         atomic_set(&data[thr].stop, 0);
1443
1444                         ret = data[thr].ret;
1445
1446                         if (ret < 0) {
1447                                 pr_err("%s decompression failed\n", hib_comp_algo);
1448                                 goto out_finish;
1449                         }
1450
1451                         if (unlikely(!data[thr].unc_len ||
1452                                 data[thr].unc_len > UNC_SIZE ||
1453                                 data[thr].unc_len & (PAGE_SIZE - 1))) {
1454                                 pr_err("Invalid %s uncompressed length\n", hib_comp_algo);
1455                                 ret = -1;
1456                                 goto out_finish;
1457                         }
1458
1459                         for (off = 0;
1460                              off < data[thr].unc_len; off += PAGE_SIZE) {
1461                                 memcpy(data_of(*snapshot),
1462                                        data[thr].unc + off, PAGE_SIZE);
1463
1464                                 if (!(nr_pages % m))
1465                                         pr_info("Image loading progress: %3d%%\n",
1466                                                 nr_pages / m * 10);
1467                                 nr_pages++;
1468
1469                                 ret = snapshot_write_next(snapshot);
1470                                 if (ret <= 0) {
1471                                         crc->run_threads = thr + 1;
1472                                         atomic_set_release(&crc->ready, 1);
1473                                         wake_up(&crc->go);
1474                                         goto out_finish;
1475                                 }
1476                         }
1477                 }
1478
1479                 crc->run_threads = thr;
1480                 atomic_set_release(&crc->ready, 1);
1481                 wake_up(&crc->go);
1482         }
1483
1484 out_finish:
1485         if (crc->run_threads) {
1486                 wait_event(crc->done, atomic_read_acquire(&crc->stop));
1487                 atomic_set(&crc->stop, 0);
1488         }
1489         stop = ktime_get();
1490         if (!ret) {
1491                 pr_info("Image loading done\n");
1492                 ret = snapshot_write_finalize(snapshot);
1493                 if (!ret && !snapshot_image_loaded(snapshot))
1494                         ret = -ENODATA;
1495                 if (!ret) {
1496                         if (swsusp_header->flags & SF_CRC32_MODE) {
1497                                 if(handle->crc32 != swsusp_header->crc32) {
1498                                         pr_err("Invalid image CRC32!\n");
1499                                         ret = -ENODATA;
1500                                 }
1501                         }
1502                 }
1503         }
1504         swsusp_show_speed(start, stop, nr_to_read, "Read");
1505 out_clean:
1506         hib_finish_batch(&hb);
1507         for (i = 0; i < ring_size; i++)
1508                 free_page((unsigned long)page[i]);
1509         if (crc) {
1510                 if (crc->thr)
1511                         kthread_stop(crc->thr);
1512                 kfree(crc);
1513         }
1514         if (data) {
1515                 for (thr = 0; thr < nr_threads; thr++) {
1516                         if (data[thr].thr)
1517                                 kthread_stop(data[thr].thr);
1518                         acomp_request_free(data[thr].cr);
1519                         crypto_free_acomp(data[thr].cc);
1520                 }
1521                 vfree(data);
1522         }
1523         vfree(page);
1524
1525         return ret;
1526 }
1527
1528 /**
1529  *      swsusp_read - read the hibernation image.
1530  *      @flags_p: flags passed by the "frozen" kernel in the image header should
1531  *                be written into this memory location
1532  */
1533
1534 int swsusp_read(unsigned int *flags_p)
1535 {
1536         int error;
1537         struct swap_map_handle handle;
1538         struct snapshot_handle snapshot;
1539         struct swsusp_info *header;
1540
1541         memset(&snapshot, 0, sizeof(struct snapshot_handle));
1542         error = snapshot_write_next(&snapshot);
1543         if (error < (int)PAGE_SIZE)
1544                 return error < 0 ? error : -EFAULT;
1545         header = (struct swsusp_info *)data_of(snapshot);
1546         error = get_swap_reader(&handle, flags_p);
1547         if (error)
1548                 goto end;
1549         if (!error)
1550                 error = swap_read_page(&handle, header, NULL);
1551         if (!error) {
1552                 error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1553                         load_image(&handle, &snapshot, header->pages - 1) :
1554                         load_compressed_image(&handle, &snapshot, header->pages - 1);
1555         }
1556         swap_reader_finish(&handle);
1557 end:
1558         if (!error)
1559                 pr_debug("Image successfully loaded\n");
1560         else
1561                 pr_debug("Error %d resuming\n", error);
1562         return error;
1563 }
1564
1565 static void *swsusp_holder;
1566
1567 /**
1568  * swsusp_check - Open the resume device and check for the swsusp signature.
1569  * @exclusive: Open the resume device exclusively.
1570  */
1571
1572 int swsusp_check(bool exclusive)
1573 {
1574         void *holder = exclusive ? &swsusp_holder : NULL;
1575         int error;
1576
1577         hib_resume_bdev_file = bdev_file_open_by_dev(swsusp_resume_device,
1578                                 BLK_OPEN_READ, holder, NULL);
1579         if (!IS_ERR(hib_resume_bdev_file)) {
1580                 clear_page(swsusp_header);
1581                 error = hib_submit_io_sync(REQ_OP_READ, swsusp_resume_block,
1582                                         swsusp_header);
1583                 if (error)
1584                         goto put;
1585
1586                 if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1587                         memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
1588                         swsusp_header_flags = swsusp_header->flags;
1589                         /* Reset swap signature now */
1590                         error = hib_submit_io_sync(REQ_OP_WRITE | REQ_SYNC,
1591                                                 swsusp_resume_block,
1592                                                 swsusp_header);
1593                 } else {
1594                         error = -EINVAL;
1595                 }
1596                 if (!error && swsusp_header->flags & SF_HW_SIG &&
1597                     swsusp_header->hw_sig != swsusp_hardware_signature) {
1598                         pr_info("Suspend image hardware signature mismatch (%08x now %08x); aborting resume.\n",
1599                                 swsusp_header->hw_sig, swsusp_hardware_signature);
1600                         error = -EINVAL;
1601                 }
1602
1603 put:
1604                 if (error)
1605                         bdev_fput(hib_resume_bdev_file);
1606                 else
1607                         pr_debug("Image signature found, resuming\n");
1608         } else {
1609                 error = PTR_ERR(hib_resume_bdev_file);
1610         }
1611
1612         if (error)
1613                 pr_debug("Image not found (code %d)\n", error);
1614
1615         return error;
1616 }
1617
1618 /**
1619  * swsusp_close - close resume device.
1620  */
1621
1622 void swsusp_close(void)
1623 {
1624         if (IS_ERR(hib_resume_bdev_file)) {
1625                 pr_debug("Image device not initialised\n");
1626                 return;
1627         }
1628
1629         fput(hib_resume_bdev_file);
1630 }
1631
1632 /**
1633  *      swsusp_unmark - Unmark swsusp signature in the resume device
1634  */
1635
1636 #ifdef CONFIG_SUSPEND
1637 int swsusp_unmark(void)
1638 {
1639         int error;
1640
1641         hib_submit_io_sync(REQ_OP_READ, swsusp_resume_block, swsusp_header);
1642         if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
1643                 memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
1644                 error = hib_submit_io_sync(REQ_OP_WRITE | REQ_SYNC,
1645                                         swsusp_resume_block,
1646                                         swsusp_header);
1647         } else {
1648                 pr_err("Cannot find swsusp signature!\n");
1649                 error = -ENODEV;
1650         }
1651
1652         /*
1653          * We just returned from suspend, we don't need the image any more.
1654          */
1655         free_all_swap_pages(root_swap);
1656
1657         return error;
1658 }
1659 #endif
1660
1661 static int __init swsusp_header_init(void)
1662 {
1663         swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
1664         if (!swsusp_header)
1665                 panic("Could not allocate memory for swsusp_header\n");
1666         return 0;
1667 }
1668
1669 core_initcall(swsusp_header_init);