2 * linux/kernel/power/swap.c
4 * This file provides functions for reading the suspend image from
5 * and writing it to a swap partition.
7 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9 * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
11 * This file is released under the GPLv2.
15 #include <linux/module.h>
16 #include <linux/file.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/genhd.h>
20 #include <linux/device.h>
21 #include <linux/bio.h>
22 #include <linux/blkdev.h>
23 #include <linux/swap.h>
24 #include <linux/swapops.h>
26 #include <linux/slab.h>
27 #include <linux/lzo.h>
28 #include <linux/vmalloc.h>
29 #include <linux/cpumask.h>
30 #include <linux/atomic.h>
31 #include <linux/kthread.h>
32 #include <linux/crc32.h>
33 #include <linux/ktime.h>
37 #define HIBERNATE_SIG "S1SUSPEND"
40 * When reading an {un,}compressed image, we may restore pages in place,
41 * in which case some architectures need these pages cleaning before they
42 * can be executed. We don't know which pages these may be, so clean the lot.
44 static bool clean_pages_on_read;
45 static bool clean_pages_on_decompress;
48 * The swap map is a data structure used for keeping track of each page
49 * written to a swap partition. It consists of many swap_map_page
50 * structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
51 * These structures are stored on the swap and linked together with the
52 * help of the .next_swap member.
54 * The swap map is created during suspend. The swap map pages are
55 * allocated and populated one at a time, so we only need one memory
56 * page to set up the entire structure.
58 * During resume we pick up all swap_map_page structures into a list.
61 #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
64 * Number of free pages that are not high.
66 static inline unsigned long low_free_pages(void)
68 return nr_free_pages() - nr_free_highpages();
72 * Number of pages required to be kept free while writing the image. Always
73 * half of all available low pages before the writing starts.
75 static inline unsigned long reqd_free_pages(void)
77 return low_free_pages() / 2;
80 struct swap_map_page {
81 sector_t entries[MAP_PAGE_ENTRIES];
85 struct swap_map_page_list {
86 struct swap_map_page *map;
87 struct swap_map_page_list *next;
91 * The swap_map_handle structure is used for handling swap in
95 struct swap_map_handle {
96 struct swap_map_page *cur;
97 struct swap_map_page_list *maps;
99 sector_t first_sector;
101 unsigned long reqd_free_pages;
105 struct swsusp_header {
106 char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
110 unsigned int flags; /* Flags to pass to the "boot" kernel */
115 static struct swsusp_header *swsusp_header;
118 * The following functions are used for tracing the allocated
119 * swap pages, so that they can be freed in case of an error.
122 struct swsusp_extent {
128 static struct rb_root swsusp_extents = RB_ROOT;
130 static int swsusp_extents_insert(unsigned long swap_offset)
132 struct rb_node **new = &(swsusp_extents.rb_node);
133 struct rb_node *parent = NULL;
134 struct swsusp_extent *ext;
136 /* Figure out where to put the new node */
138 ext = rb_entry(*new, struct swsusp_extent, node);
140 if (swap_offset < ext->start) {
142 if (swap_offset == ext->start - 1) {
146 new = &((*new)->rb_left);
147 } else if (swap_offset > ext->end) {
149 if (swap_offset == ext->end + 1) {
153 new = &((*new)->rb_right);
155 /* It already is in the tree */
159 /* Add the new node and rebalance the tree. */
160 ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
164 ext->start = swap_offset;
165 ext->end = swap_offset;
166 rb_link_node(&ext->node, parent, new);
167 rb_insert_color(&ext->node, &swsusp_extents);
172 * alloc_swapdev_block - allocate a swap page and register that it has
173 * been allocated, so that it can be freed in case of an error.
176 sector_t alloc_swapdev_block(int swap)
178 unsigned long offset;
180 offset = swp_offset(get_swap_page_of_type(swap));
182 if (swsusp_extents_insert(offset))
183 swap_free(swp_entry(swap, offset));
185 return swapdev_block(swap, offset);
191 * free_all_swap_pages - free swap pages allocated for saving image data.
192 * It also frees the extents used to register which swap entries had been
196 void free_all_swap_pages(int swap)
198 struct rb_node *node;
200 while ((node = swsusp_extents.rb_node)) {
201 struct swsusp_extent *ext;
202 unsigned long offset;
204 ext = container_of(node, struct swsusp_extent, node);
205 rb_erase(node, &swsusp_extents);
206 for (offset = ext->start; offset <= ext->end; offset++)
207 swap_free(swp_entry(swap, offset));
213 int swsusp_swap_in_use(void)
215 return (swsusp_extents.rb_node != NULL);
222 static unsigned short root_swap = 0xffff;
223 static struct block_device *hib_resume_bdev;
225 struct hib_bio_batch {
227 wait_queue_head_t wait;
231 static void hib_init_batch(struct hib_bio_batch *hb)
233 atomic_set(&hb->count, 0);
234 init_waitqueue_head(&hb->wait);
238 static void hib_end_io(struct bio *bio)
240 struct hib_bio_batch *hb = bio->bi_private;
241 struct page *page = bio->bi_io_vec[0].bv_page;
244 printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
245 imajor(bio->bi_bdev->bd_inode),
246 iminor(bio->bi_bdev->bd_inode),
247 (unsigned long long)bio->bi_iter.bi_sector);
250 if (bio_data_dir(bio) == WRITE)
252 else if (clean_pages_on_read)
253 flush_icache_range((unsigned long)page_address(page),
254 (unsigned long)page_address(page) + PAGE_SIZE);
256 if (bio->bi_error && !hb->error)
257 hb->error = bio->bi_error;
258 if (atomic_dec_and_test(&hb->count))
264 static int hib_submit_io(int rw, pgoff_t page_off, void *addr,
265 struct hib_bio_batch *hb)
267 struct page *page = virt_to_page(addr);
271 bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1);
272 bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
273 bio->bi_bdev = hib_resume_bdev;
275 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
276 printk(KERN_ERR "PM: Adding page to bio failed at %llu\n",
277 (unsigned long long)bio->bi_iter.bi_sector);
283 bio->bi_end_io = hib_end_io;
284 bio->bi_private = hb;
285 atomic_inc(&hb->count);
288 error = submit_bio_wait(rw, bio);
295 static int hib_wait_io(struct hib_bio_batch *hb)
297 wait_event(hb->wait, atomic_read(&hb->count) == 0);
305 static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
309 hib_submit_io(READ_SYNC, swsusp_resume_block, swsusp_header, NULL);
310 if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
311 !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
312 memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
313 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
314 swsusp_header->image = handle->first_sector;
315 swsusp_header->flags = flags;
316 if (flags & SF_CRC32_MODE)
317 swsusp_header->crc32 = handle->crc32;
318 error = hib_submit_io(WRITE_SYNC, swsusp_resume_block,
319 swsusp_header, NULL);
321 printk(KERN_ERR "PM: Swap header not found!\n");
328 * swsusp_swap_check - check if the resume device is a swap device
329 * and get its index (if so)
331 * This is called before saving image
333 static int swsusp_swap_check(void)
337 res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
343 res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
347 res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
349 blkdev_put(hib_resume_bdev, FMODE_WRITE);
355 * write_page - Write one page to given swap location.
356 * @buf: Address we're writing.
357 * @offset: Offset of the swap page we're writing to.
358 * @hb: bio completion batch
361 static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
370 src = (void *)__get_free_page(__GFP_RECLAIM | __GFP_NOWARN |
375 ret = hib_wait_io(hb); /* Free pages */
378 src = (void *)__get_free_page(__GFP_RECLAIM |
385 hb = NULL; /* Go synchronous */
392 return hib_submit_io(WRITE_SYNC, offset, src, hb);
395 static void release_swap_writer(struct swap_map_handle *handle)
398 free_page((unsigned long)handle->cur);
402 static int get_swap_writer(struct swap_map_handle *handle)
406 ret = swsusp_swap_check();
409 printk(KERN_ERR "PM: Cannot find swap device, try "
413 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
418 handle->cur_swap = alloc_swapdev_block(root_swap);
419 if (!handle->cur_swap) {
424 handle->reqd_free_pages = reqd_free_pages();
425 handle->first_sector = handle->cur_swap;
428 release_swap_writer(handle);
430 swsusp_close(FMODE_WRITE);
434 static int swap_write_page(struct swap_map_handle *handle, void *buf,
435 struct hib_bio_batch *hb)
442 offset = alloc_swapdev_block(root_swap);
443 error = write_page(buf, offset, hb);
446 handle->cur->entries[handle->k++] = offset;
447 if (handle->k >= MAP_PAGE_ENTRIES) {
448 offset = alloc_swapdev_block(root_swap);
451 handle->cur->next_swap = offset;
452 error = write_page(handle->cur, handle->cur_swap, hb);
455 clear_page(handle->cur);
456 handle->cur_swap = offset;
459 if (hb && low_free_pages() <= handle->reqd_free_pages) {
460 error = hib_wait_io(hb);
464 * Recalculate the number of required free pages, to
465 * make sure we never take more than half.
467 handle->reqd_free_pages = reqd_free_pages();
474 static int flush_swap_writer(struct swap_map_handle *handle)
476 if (handle->cur && handle->cur_swap)
477 return write_page(handle->cur, handle->cur_swap, NULL);
482 static int swap_writer_finish(struct swap_map_handle *handle,
483 unsigned int flags, int error)
486 flush_swap_writer(handle);
487 printk(KERN_INFO "PM: S");
488 error = mark_swapfiles(handle, flags);
493 free_all_swap_pages(root_swap);
494 release_swap_writer(handle);
495 swsusp_close(FMODE_WRITE);
500 /* We need to remember how much compressed data we need to read. */
501 #define LZO_HEADER sizeof(size_t)
503 /* Number of pages/bytes we'll compress at one time. */
504 #define LZO_UNC_PAGES 32
505 #define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE)
507 /* Number of pages/bytes we need for compressed data (worst case). */
508 #define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
509 LZO_HEADER, PAGE_SIZE)
510 #define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
512 /* Maximum number of threads for compression/decompression. */
513 #define LZO_THREADS 3
515 /* Minimum/maximum number of pages for read buffering. */
516 #define LZO_MIN_RD_PAGES 1024
517 #define LZO_MAX_RD_PAGES 8192
521 * save_image - save the suspend image data
524 static int save_image(struct swap_map_handle *handle,
525 struct snapshot_handle *snapshot,
526 unsigned int nr_to_write)
532 struct hib_bio_batch hb;
538 printk(KERN_INFO "PM: Saving image data pages (%u pages)...\n",
540 m = nr_to_write / 10;
546 ret = snapshot_read_next(snapshot);
549 ret = swap_write_page(handle, data_of(*snapshot), &hb);
553 printk(KERN_INFO "PM: Image saving progress: %3d%%\n",
557 err2 = hib_wait_io(&hb);
562 printk(KERN_INFO "PM: Image saving done.\n");
563 swsusp_show_speed(start, stop, nr_to_write, "Wrote");
568 * Structure used for CRC32.
571 struct task_struct *thr; /* thread */
572 atomic_t ready; /* ready to start flag */
573 atomic_t stop; /* ready to stop flag */
574 unsigned run_threads; /* nr current threads */
575 wait_queue_head_t go; /* start crc update */
576 wait_queue_head_t done; /* crc update done */
577 u32 *crc32; /* points to handle's crc32 */
578 size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */
579 unsigned char *unc[LZO_THREADS]; /* uncompressed data */
583 * CRC32 update function that runs in its own thread.
585 static int crc32_threadfn(void *data)
587 struct crc_data *d = data;
591 wait_event(d->go, atomic_read(&d->ready) ||
592 kthread_should_stop());
593 if (kthread_should_stop()) {
595 atomic_set(&d->stop, 1);
599 atomic_set(&d->ready, 0);
601 for (i = 0; i < d->run_threads; i++)
602 *d->crc32 = crc32_le(*d->crc32,
603 d->unc[i], *d->unc_len[i]);
604 atomic_set(&d->stop, 1);
610 * Structure used for LZO data compression.
613 struct task_struct *thr; /* thread */
614 atomic_t ready; /* ready to start flag */
615 atomic_t stop; /* ready to stop flag */
616 int ret; /* return code */
617 wait_queue_head_t go; /* start compression */
618 wait_queue_head_t done; /* compression done */
619 size_t unc_len; /* uncompressed length */
620 size_t cmp_len; /* compressed length */
621 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
622 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
623 unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */
627 * Compression function that runs in its own thread.
629 static int lzo_compress_threadfn(void *data)
631 struct cmp_data *d = data;
634 wait_event(d->go, atomic_read(&d->ready) ||
635 kthread_should_stop());
636 if (kthread_should_stop()) {
639 atomic_set(&d->stop, 1);
643 atomic_set(&d->ready, 0);
645 d->ret = lzo1x_1_compress(d->unc, d->unc_len,
646 d->cmp + LZO_HEADER, &d->cmp_len,
648 atomic_set(&d->stop, 1);
655 * save_image_lzo - Save the suspend image data compressed with LZO.
656 * @handle: Swap map handle to use for saving the image.
657 * @snapshot: Image to read data from.
658 * @nr_to_write: Number of pages to save.
660 static int save_image_lzo(struct swap_map_handle *handle,
661 struct snapshot_handle *snapshot,
662 unsigned int nr_to_write)
668 struct hib_bio_batch hb;
672 unsigned thr, run_threads, nr_threads;
673 unsigned char *page = NULL;
674 struct cmp_data *data = NULL;
675 struct crc_data *crc = NULL;
680 * We'll limit the number of threads for compression to limit memory
683 nr_threads = num_online_cpus() - 1;
684 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
686 page = (void *)__get_free_page(__GFP_RECLAIM | __GFP_HIGH);
688 printk(KERN_ERR "PM: Failed to allocate LZO page\n");
693 data = vmalloc(sizeof(*data) * nr_threads);
695 printk(KERN_ERR "PM: Failed to allocate LZO data\n");
699 for (thr = 0; thr < nr_threads; thr++)
700 memset(&data[thr], 0, offsetof(struct cmp_data, go));
702 crc = kmalloc(sizeof(*crc), GFP_KERNEL);
704 printk(KERN_ERR "PM: Failed to allocate crc\n");
708 memset(crc, 0, offsetof(struct crc_data, go));
711 * Start the compression threads.
713 for (thr = 0; thr < nr_threads; thr++) {
714 init_waitqueue_head(&data[thr].go);
715 init_waitqueue_head(&data[thr].done);
717 data[thr].thr = kthread_run(lzo_compress_threadfn,
719 "image_compress/%u", thr);
720 if (IS_ERR(data[thr].thr)) {
721 data[thr].thr = NULL;
723 "PM: Cannot start compression threads\n");
730 * Start the CRC32 thread.
732 init_waitqueue_head(&crc->go);
733 init_waitqueue_head(&crc->done);
736 crc->crc32 = &handle->crc32;
737 for (thr = 0; thr < nr_threads; thr++) {
738 crc->unc[thr] = data[thr].unc;
739 crc->unc_len[thr] = &data[thr].unc_len;
742 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
743 if (IS_ERR(crc->thr)) {
745 printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
751 * Adjust the number of required free pages after all allocations have
752 * been done. We don't want to run out of pages when writing.
754 handle->reqd_free_pages = reqd_free_pages();
757 "PM: Using %u thread(s) for compression.\n"
758 "PM: Compressing and saving image data (%u pages)...\n",
759 nr_threads, nr_to_write);
760 m = nr_to_write / 10;
766 for (thr = 0; thr < nr_threads; thr++) {
767 for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
768 ret = snapshot_read_next(snapshot);
775 memcpy(data[thr].unc + off,
776 data_of(*snapshot), PAGE_SIZE);
780 "PM: Image saving progress: "
788 data[thr].unc_len = off;
790 atomic_set(&data[thr].ready, 1);
791 wake_up(&data[thr].go);
797 crc->run_threads = thr;
798 atomic_set(&crc->ready, 1);
801 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
802 wait_event(data[thr].done,
803 atomic_read(&data[thr].stop));
804 atomic_set(&data[thr].stop, 0);
809 printk(KERN_ERR "PM: LZO compression failed\n");
813 if (unlikely(!data[thr].cmp_len ||
815 lzo1x_worst_compress(data[thr].unc_len))) {
817 "PM: Invalid LZO compressed length\n");
822 *(size_t *)data[thr].cmp = data[thr].cmp_len;
825 * Given we are writing one page at a time to disk, we
826 * copy that much from the buffer, although the last
827 * bit will likely be smaller than full page. This is
828 * OK - we saved the length of the compressed data, so
829 * any garbage at the end will be discarded when we
833 off < LZO_HEADER + data[thr].cmp_len;
835 memcpy(page, data[thr].cmp + off, PAGE_SIZE);
837 ret = swap_write_page(handle, page, &hb);
843 wait_event(crc->done, atomic_read(&crc->stop));
844 atomic_set(&crc->stop, 0);
848 err2 = hib_wait_io(&hb);
853 printk(KERN_INFO "PM: Image saving done.\n");
854 swsusp_show_speed(start, stop, nr_to_write, "Wrote");
858 kthread_stop(crc->thr);
862 for (thr = 0; thr < nr_threads; thr++)
864 kthread_stop(data[thr].thr);
867 if (page) free_page((unsigned long)page);
873 * enough_swap - Make sure we have enough swap to save the image.
875 * Returns TRUE or FALSE after checking the total amount of swap
876 * space avaiable from the resume partition.
879 static int enough_swap(unsigned int nr_pages, unsigned int flags)
881 unsigned int free_swap = count_swap_pages(root_swap, 1);
882 unsigned int required;
884 pr_debug("PM: Free swap pages: %u\n", free_swap);
886 required = PAGES_FOR_IO + nr_pages;
887 return free_swap > required;
891 * swsusp_write - Write entire image and metadata.
892 * @flags: flags to pass to the "boot" kernel in the image header
894 * It is important _NOT_ to umount filesystems at this point. We want
895 * them synced (in case something goes wrong) but we DO not want to mark
896 * filesystem clean: it is not. (And it does not matter, if we resume
897 * correctly, we'll mark system clean, anyway.)
900 int swsusp_write(unsigned int flags)
902 struct swap_map_handle handle;
903 struct snapshot_handle snapshot;
904 struct swsusp_info *header;
908 pages = snapshot_get_image_size();
909 error = get_swap_writer(&handle);
911 printk(KERN_ERR "PM: Cannot get swap writer\n");
914 if (flags & SF_NOCOMPRESS_MODE) {
915 if (!enough_swap(pages, flags)) {
916 printk(KERN_ERR "PM: Not enough free swap\n");
921 memset(&snapshot, 0, sizeof(struct snapshot_handle));
922 error = snapshot_read_next(&snapshot);
923 if (error < PAGE_SIZE) {
929 header = (struct swsusp_info *)data_of(snapshot);
930 error = swap_write_page(&handle, header, NULL);
932 error = (flags & SF_NOCOMPRESS_MODE) ?
933 save_image(&handle, &snapshot, pages - 1) :
934 save_image_lzo(&handle, &snapshot, pages - 1);
937 error = swap_writer_finish(&handle, flags, error);
942 * The following functions allow us to read data using a swap map
943 * in a file-alike way
946 static void release_swap_reader(struct swap_map_handle *handle)
948 struct swap_map_page_list *tmp;
950 while (handle->maps) {
951 if (handle->maps->map)
952 free_page((unsigned long)handle->maps->map);
954 handle->maps = handle->maps->next;
960 static int get_swap_reader(struct swap_map_handle *handle,
961 unsigned int *flags_p)
964 struct swap_map_page_list *tmp, *last;
967 *flags_p = swsusp_header->flags;
969 if (!swsusp_header->image) /* how can this happen? */
973 last = handle->maps = NULL;
974 offset = swsusp_header->image;
976 tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL);
978 release_swap_reader(handle);
981 memset(tmp, 0, sizeof(*tmp));
988 tmp->map = (struct swap_map_page *)
989 __get_free_page(__GFP_RECLAIM | __GFP_HIGH);
991 release_swap_reader(handle);
995 error = hib_submit_io(READ_SYNC, offset, tmp->map, NULL);
997 release_swap_reader(handle);
1000 offset = tmp->map->next_swap;
1003 handle->cur = handle->maps->map;
1007 static int swap_read_page(struct swap_map_handle *handle, void *buf,
1008 struct hib_bio_batch *hb)
1012 struct swap_map_page_list *tmp;
1016 offset = handle->cur->entries[handle->k];
1019 error = hib_submit_io(READ_SYNC, offset, buf, hb);
1022 if (++handle->k >= MAP_PAGE_ENTRIES) {
1024 free_page((unsigned long)handle->maps->map);
1026 handle->maps = handle->maps->next;
1029 release_swap_reader(handle);
1031 handle->cur = handle->maps->map;
1036 static int swap_reader_finish(struct swap_map_handle *handle)
1038 release_swap_reader(handle);
1044 * load_image - load the image using the swap map handle
1045 * @handle and the snapshot handle @snapshot
1046 * (assume there are @nr_pages pages to load)
1049 static int load_image(struct swap_map_handle *handle,
1050 struct snapshot_handle *snapshot,
1051 unsigned int nr_to_read)
1057 struct hib_bio_batch hb;
1061 hib_init_batch(&hb);
1063 clean_pages_on_read = true;
1064 printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n",
1066 m = nr_to_read / 10;
1070 start = ktime_get();
1072 ret = snapshot_write_next(snapshot);
1075 ret = swap_read_page(handle, data_of(*snapshot), &hb);
1078 if (snapshot->sync_read)
1079 ret = hib_wait_io(&hb);
1082 if (!(nr_pages % m))
1083 printk(KERN_INFO "PM: Image loading progress: %3d%%\n",
1087 err2 = hib_wait_io(&hb);
1092 printk(KERN_INFO "PM: Image loading done.\n");
1093 snapshot_write_finalize(snapshot);
1094 if (!snapshot_image_loaded(snapshot))
1097 swsusp_show_speed(start, stop, nr_to_read, "Read");
1102 * Structure used for LZO data decompression.
1105 struct task_struct *thr; /* thread */
1106 atomic_t ready; /* ready to start flag */
1107 atomic_t stop; /* ready to stop flag */
1108 int ret; /* return code */
1109 wait_queue_head_t go; /* start decompression */
1110 wait_queue_head_t done; /* decompression done */
1111 size_t unc_len; /* uncompressed length */
1112 size_t cmp_len; /* compressed length */
1113 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
1114 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
1118 * Deompression function that runs in its own thread.
1120 static int lzo_decompress_threadfn(void *data)
1122 struct dec_data *d = data;
1125 wait_event(d->go, atomic_read(&d->ready) ||
1126 kthread_should_stop());
1127 if (kthread_should_stop()) {
1130 atomic_set(&d->stop, 1);
1134 atomic_set(&d->ready, 0);
1136 d->unc_len = LZO_UNC_SIZE;
1137 d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
1138 d->unc, &d->unc_len);
1139 if (clean_pages_on_decompress)
1140 flush_icache_range((unsigned long)d->unc,
1141 (unsigned long)d->unc + d->unc_len);
1143 atomic_set(&d->stop, 1);
1150 * load_image_lzo - Load compressed image data and decompress them with LZO.
1151 * @handle: Swap map handle to use for loading data.
1152 * @snapshot: Image to copy uncompressed data into.
1153 * @nr_to_read: Number of pages to load.
1155 static int load_image_lzo(struct swap_map_handle *handle,
1156 struct snapshot_handle *snapshot,
1157 unsigned int nr_to_read)
1162 struct hib_bio_batch hb;
1167 unsigned i, thr, run_threads, nr_threads;
1168 unsigned ring = 0, pg = 0, ring_size = 0,
1169 have = 0, want, need, asked = 0;
1170 unsigned long read_pages = 0;
1171 unsigned char **page = NULL;
1172 struct dec_data *data = NULL;
1173 struct crc_data *crc = NULL;
1175 hib_init_batch(&hb);
1178 * We'll limit the number of threads for decompression to limit memory
1181 nr_threads = num_online_cpus() - 1;
1182 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
1184 page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES);
1186 printk(KERN_ERR "PM: Failed to allocate LZO page\n");
1191 data = vmalloc(sizeof(*data) * nr_threads);
1193 printk(KERN_ERR "PM: Failed to allocate LZO data\n");
1197 for (thr = 0; thr < nr_threads; thr++)
1198 memset(&data[thr], 0, offsetof(struct dec_data, go));
1200 crc = kmalloc(sizeof(*crc), GFP_KERNEL);
1202 printk(KERN_ERR "PM: Failed to allocate crc\n");
1206 memset(crc, 0, offsetof(struct crc_data, go));
1208 clean_pages_on_decompress = true;
1211 * Start the decompression threads.
1213 for (thr = 0; thr < nr_threads; thr++) {
1214 init_waitqueue_head(&data[thr].go);
1215 init_waitqueue_head(&data[thr].done);
1217 data[thr].thr = kthread_run(lzo_decompress_threadfn,
1219 "image_decompress/%u", thr);
1220 if (IS_ERR(data[thr].thr)) {
1221 data[thr].thr = NULL;
1223 "PM: Cannot start decompression threads\n");
1230 * Start the CRC32 thread.
1232 init_waitqueue_head(&crc->go);
1233 init_waitqueue_head(&crc->done);
1236 crc->crc32 = &handle->crc32;
1237 for (thr = 0; thr < nr_threads; thr++) {
1238 crc->unc[thr] = data[thr].unc;
1239 crc->unc_len[thr] = &data[thr].unc_len;
1242 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1243 if (IS_ERR(crc->thr)) {
1245 printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
1251 * Set the number of pages for read buffering.
1252 * This is complete guesswork, because we'll only know the real
1253 * picture once prepare_image() is called, which is much later on
1254 * during the image load phase. We'll assume the worst case and
1255 * say that none of the image pages are from high memory.
1257 if (low_free_pages() > snapshot_get_image_size())
1258 read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
1259 read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
1261 for (i = 0; i < read_pages; i++) {
1262 page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
1263 __GFP_RECLAIM | __GFP_HIGH :
1264 __GFP_RECLAIM | __GFP_NOWARN |
1268 if (i < LZO_CMP_PAGES) {
1271 "PM: Failed to allocate LZO pages\n");
1279 want = ring_size = i;
1282 "PM: Using %u thread(s) for decompression.\n"
1283 "PM: Loading and decompressing image data (%u pages)...\n",
1284 nr_threads, nr_to_read);
1285 m = nr_to_read / 10;
1289 start = ktime_get();
1291 ret = snapshot_write_next(snapshot);
1296 for (i = 0; !eof && i < want; i++) {
1297 ret = swap_read_page(handle, page[ring], &hb);
1300 * On real read error, finish. On end of data,
1301 * set EOF flag and just exit the read loop.
1304 handle->cur->entries[handle->k]) {
1311 if (++ring >= ring_size)
1318 * We are out of data, wait for some more.
1324 ret = hib_wait_io(&hb);
1333 if (crc->run_threads) {
1334 wait_event(crc->done, atomic_read(&crc->stop));
1335 atomic_set(&crc->stop, 0);
1336 crc->run_threads = 0;
1339 for (thr = 0; have && thr < nr_threads; thr++) {
1340 data[thr].cmp_len = *(size_t *)page[pg];
1341 if (unlikely(!data[thr].cmp_len ||
1343 lzo1x_worst_compress(LZO_UNC_SIZE))) {
1345 "PM: Invalid LZO compressed length\n");
1350 need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
1361 off < LZO_HEADER + data[thr].cmp_len;
1363 memcpy(data[thr].cmp + off,
1364 page[pg], PAGE_SIZE);
1367 if (++pg >= ring_size)
1371 atomic_set(&data[thr].ready, 1);
1372 wake_up(&data[thr].go);
1376 * Wait for more data while we are decompressing.
1378 if (have < LZO_CMP_PAGES && asked) {
1379 ret = hib_wait_io(&hb);
1388 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1389 wait_event(data[thr].done,
1390 atomic_read(&data[thr].stop));
1391 atomic_set(&data[thr].stop, 0);
1393 ret = data[thr].ret;
1397 "PM: LZO decompression failed\n");
1401 if (unlikely(!data[thr].unc_len ||
1402 data[thr].unc_len > LZO_UNC_SIZE ||
1403 data[thr].unc_len & (PAGE_SIZE - 1))) {
1405 "PM: Invalid LZO uncompressed length\n");
1411 off < data[thr].unc_len; off += PAGE_SIZE) {
1412 memcpy(data_of(*snapshot),
1413 data[thr].unc + off, PAGE_SIZE);
1415 if (!(nr_pages % m))
1417 "PM: Image loading progress: "
1422 ret = snapshot_write_next(snapshot);
1424 crc->run_threads = thr + 1;
1425 atomic_set(&crc->ready, 1);
1432 crc->run_threads = thr;
1433 atomic_set(&crc->ready, 1);
1438 if (crc->run_threads) {
1439 wait_event(crc->done, atomic_read(&crc->stop));
1440 atomic_set(&crc->stop, 0);
1444 printk(KERN_INFO "PM: Image loading done.\n");
1445 snapshot_write_finalize(snapshot);
1446 if (!snapshot_image_loaded(snapshot))
1449 if (swsusp_header->flags & SF_CRC32_MODE) {
1450 if(handle->crc32 != swsusp_header->crc32) {
1452 "PM: Invalid image CRC32!\n");
1458 swsusp_show_speed(start, stop, nr_to_read, "Read");
1460 for (i = 0; i < ring_size; i++)
1461 free_page((unsigned long)page[i]);
1464 kthread_stop(crc->thr);
1468 for (thr = 0; thr < nr_threads; thr++)
1470 kthread_stop(data[thr].thr);
1479 * swsusp_read - read the hibernation image.
1480 * @flags_p: flags passed by the "frozen" kernel in the image header should
1481 * be written into this memory location
1484 int swsusp_read(unsigned int *flags_p)
1487 struct swap_map_handle handle;
1488 struct snapshot_handle snapshot;
1489 struct swsusp_info *header;
1491 memset(&snapshot, 0, sizeof(struct snapshot_handle));
1492 error = snapshot_write_next(&snapshot);
1493 if (error < PAGE_SIZE)
1494 return error < 0 ? error : -EFAULT;
1495 header = (struct swsusp_info *)data_of(snapshot);
1496 error = get_swap_reader(&handle, flags_p);
1500 error = swap_read_page(&handle, header, NULL);
1502 error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1503 load_image(&handle, &snapshot, header->pages - 1) :
1504 load_image_lzo(&handle, &snapshot, header->pages - 1);
1506 swap_reader_finish(&handle);
1509 pr_debug("PM: Image successfully loaded\n");
1511 pr_debug("PM: Error %d resuming\n", error);
1516 * swsusp_check - Check for swsusp signature in the resume device
1519 int swsusp_check(void)
1523 hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
1525 if (!IS_ERR(hib_resume_bdev)) {
1526 set_blocksize(hib_resume_bdev, PAGE_SIZE);
1527 clear_page(swsusp_header);
1528 error = hib_submit_io(READ_SYNC, swsusp_resume_block,
1529 swsusp_header, NULL);
1533 if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1534 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
1535 /* Reset swap signature now */
1536 error = hib_submit_io(WRITE_SYNC, swsusp_resume_block,
1537 swsusp_header, NULL);
1544 blkdev_put(hib_resume_bdev, FMODE_READ);
1546 pr_debug("PM: Image signature found, resuming\n");
1548 error = PTR_ERR(hib_resume_bdev);
1552 pr_debug("PM: Image not found (code %d)\n", error);
1558 * swsusp_close - close swap device.
1561 void swsusp_close(fmode_t mode)
1563 if (IS_ERR(hib_resume_bdev)) {
1564 pr_debug("PM: Image device not initialised\n");
1568 blkdev_put(hib_resume_bdev, mode);
1572 * swsusp_unmark - Unmark swsusp signature in the resume device
1575 #ifdef CONFIG_SUSPEND
1576 int swsusp_unmark(void)
1580 hib_submit_io(READ_SYNC, swsusp_resume_block, swsusp_header, NULL);
1581 if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
1582 memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
1583 error = hib_submit_io(WRITE_SYNC, swsusp_resume_block,
1584 swsusp_header, NULL);
1586 printk(KERN_ERR "PM: Cannot find swsusp signature!\n");
1591 * We just returned from suspend, we don't need the image any more.
1593 free_all_swap_pages(root_swap);
1599 static int swsusp_header_init(void)
1601 swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
1603 panic("Could not allocate memory for swsusp_header\n");
1607 core_initcall(swsusp_header_init);