swap.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/kernel/power/swap.c
  4. *
  5. * This file provides functions for reading the suspend image from
  6. * and writing it to a swap partition.
  7. *
  8. * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
  9. * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
  10. * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
  11. */
  12. #define pr_fmt(fmt) "PM: " fmt
  13. #include <linux/module.h>
  14. #include <linux/file.h>
  15. #include <linux/delay.h>
  16. #include <linux/bitops.h>
  17. #include <linux/genhd.h>
  18. #include <linux/device.h>
  19. #include <linux/bio.h>
  20. #include <linux/blkdev.h>
  21. #include <linux/swap.h>
  22. #include <linux/swapops.h>
  23. #include <linux/pm.h>
  24. #include <linux/slab.h>
  25. #include <linux/lzo.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/cpumask.h>
  28. #include <linux/atomic.h>
  29. #include <linux/kthread.h>
  30. #include <linux/crc32.h>
  31. #include <linux/ktime.h>
  32. #include "power.h"
  33. #define HIBERNATE_SIG "S1SUSPEND"
  34. /*
  35. * When reading an {un,}compressed image, we may restore pages in place,
  36. * in which case some architectures need these pages cleaning before they
  37. * can be executed. We don't know which pages these may be, so clean the lot.
  38. */
  39. static bool clean_pages_on_read;
  40. static bool clean_pages_on_decompress;
  41. /*
  42. * The swap map is a data structure used for keeping track of each page
  43. * written to a swap partition. It consists of many swap_map_page
  44. * structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
  45. * These structures are stored on the swap and linked together with the
  46. * help of the .next_swap member.
  47. *
  48. * The swap map is created during suspend. The swap map pages are
  49. * allocated and populated one at a time, so we only need one memory
  50. * page to set up the entire structure.
  51. *
  52. * During resume we pick up all swap_map_page structures into a list.
  53. */
  54. #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
  55. /*
  56. * Number of free pages that are not high.
  57. */
  58. static inline unsigned long low_free_pages(void)
  59. {
  60. return nr_free_pages() - nr_free_highpages();
  61. }
  62. /*
  63. * Number of pages required to be kept free while writing the image. Always
  64. * half of all available low pages before the writing starts.
  65. */
  66. static inline unsigned long reqd_free_pages(void)
  67. {
  68. return low_free_pages() / 2;
  69. }
  70. struct swap_map_page {
  71. sector_t entries[MAP_PAGE_ENTRIES];
  72. sector_t next_swap;
  73. };
  74. struct swap_map_page_list {
  75. struct swap_map_page *map;
  76. struct swap_map_page_list *next;
  77. };
  78. /**
  79. * The swap_map_handle structure is used for handling swap in
  80. * a file-alike way
  81. */
  82. struct swap_map_handle {
  83. struct swap_map_page *cur;
  84. struct swap_map_page_list *maps;
  85. sector_t cur_swap;
  86. sector_t first_sector;
  87. unsigned int k;
  88. unsigned long reqd_free_pages;
  89. u32 crc32;
  90. };
  91. struct swsusp_header {
  92. char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
  93. sizeof(u32)];
  94. u32 crc32;
  95. sector_t image;
  96. unsigned int flags; /* Flags to pass to the "boot" kernel */
  97. char orig_sig[10];
  98. char sig[10];
  99. } __packed;
  100. static struct swsusp_header *swsusp_header;
  101. /**
  102. * The following functions are used for tracing the allocated
  103. * swap pages, so that they can be freed in case of an error.
  104. */
  105. struct swsusp_extent {
  106. struct rb_node node;
  107. unsigned long start;
  108. unsigned long end;
  109. };
  110. static struct rb_root swsusp_extents = RB_ROOT;
  111. static int swsusp_extents_insert(unsigned long swap_offset)
  112. {
  113. struct rb_node **new = &(swsusp_extents.rb_node);
  114. struct rb_node *parent = NULL;
  115. struct swsusp_extent *ext;
  116. /* Figure out where to put the new node */
  117. while (*new) {
  118. ext = rb_entry(*new, struct swsusp_extent, node);
  119. parent = *new;
  120. if (swap_offset < ext->start) {
  121. /* Try to merge */
  122. if (swap_offset == ext->start - 1) {
  123. ext->start--;
  124. return 0;
  125. }
  126. new = &((*new)->rb_left);
  127. } else if (swap_offset > ext->end) {
  128. /* Try to merge */
  129. if (swap_offset == ext->end + 1) {
  130. ext->end++;
  131. return 0;
  132. }
  133. new = &((*new)->rb_right);
  134. } else {
  135. /* It already is in the tree */
  136. return -EINVAL;
  137. }
  138. }
  139. /* Add the new node and rebalance the tree. */
  140. ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
  141. if (!ext)
  142. return -ENOMEM;
  143. ext->start = swap_offset;
  144. ext->end = swap_offset;
  145. rb_link_node(&ext->node, parent, new);
  146. rb_insert_color(&ext->node, &swsusp_extents);
  147. return 0;
  148. }
  149. /**
  150. * alloc_swapdev_block - allocate a swap page and register that it has
  151. * been allocated, so that it can be freed in case of an error.
  152. */
  153. sector_t alloc_swapdev_block(int swap)
  154. {
  155. unsigned long offset;
  156. offset = swp_offset(get_swap_page_of_type(swap));
  157. if (offset) {
  158. if (swsusp_extents_insert(offset))
  159. swap_free(swp_entry(swap, offset));
  160. else
  161. return swapdev_block(swap, offset);
  162. }
  163. return 0;
  164. }
  165. /**
  166. * free_all_swap_pages - free swap pages allocated for saving image data.
  167. * It also frees the extents used to register which swap entries had been
  168. * allocated.
  169. */
  170. void free_all_swap_pages(int swap)
  171. {
  172. struct rb_node *node;
  173. while ((node = swsusp_extents.rb_node)) {
  174. struct swsusp_extent *ext;
  175. unsigned long offset;
  176. ext = rb_entry(node, struct swsusp_extent, node);
  177. rb_erase(node, &swsusp_extents);
  178. for (offset = ext->start; offset <= ext->end; offset++)
  179. swap_free(swp_entry(swap, offset));
  180. kfree(ext);
  181. }
  182. }
  183. int swsusp_swap_in_use(void)
  184. {
  185. return (swsusp_extents.rb_node != NULL);
  186. }
  187. /*
  188. * General things
  189. */
  190. static unsigned short root_swap = 0xffff;
  191. static struct block_device *hib_resume_bdev;
  192. struct hib_bio_batch {
  193. atomic_t count;
  194. wait_queue_head_t wait;
  195. blk_status_t error;
  196. struct blk_plug plug;
  197. };
  198. static void hib_init_batch(struct hib_bio_batch *hb)
  199. {
  200. atomic_set(&hb->count, 0);
  201. init_waitqueue_head(&hb->wait);
  202. hb->error = BLK_STS_OK;
  203. blk_start_plug(&hb->plug);
  204. }
  205. static void hib_finish_batch(struct hib_bio_batch *hb)
  206. {
  207. blk_finish_plug(&hb->plug);
  208. }
  209. static void hib_end_io(struct bio *bio)
  210. {
  211. struct hib_bio_batch *hb = bio->bi_private;
  212. struct page *page = bio_first_page_all(bio);
  213. if (bio->bi_status) {
  214. pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
  215. MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
  216. (unsigned long long)bio->bi_iter.bi_sector);
  217. }
  218. if (bio_data_dir(bio) == WRITE)
  219. put_page(page);
  220. else if (clean_pages_on_read)
  221. flush_icache_range((unsigned long)page_address(page),
  222. (unsigned long)page_address(page) + PAGE_SIZE);
  223. if (bio->bi_status && !hb->error)
  224. hb->error = bio->bi_status;
  225. if (atomic_dec_and_test(&hb->count))
  226. wake_up(&hb->wait);
  227. bio_put(bio);
  228. }
  229. static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
  230. struct hib_bio_batch *hb)
  231. {
  232. struct page *page = virt_to_page(addr);
  233. struct bio *bio;
  234. int error = 0;
  235. bio = bio_alloc(GFP_NOIO | __GFP_HIGH, 1);
  236. bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
  237. bio_set_dev(bio, hib_resume_bdev);
  238. bio_set_op_attrs(bio, op, op_flags);
  239. if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
  240. pr_err("Adding page to bio failed at %llu\n",
  241. (unsigned long long)bio->bi_iter.bi_sector);
  242. bio_put(bio);
  243. return -EFAULT;
  244. }
  245. if (hb) {
  246. bio->bi_end_io = hib_end_io;
  247. bio->bi_private = hb;
  248. atomic_inc(&hb->count);
  249. submit_bio(bio);
  250. } else {
  251. error = submit_bio_wait(bio);
  252. bio_put(bio);
  253. }
  254. return error;
  255. }
  256. static int hib_wait_io(struct hib_bio_batch *hb)
  257. {
  258. /*
  259. * We are relying on the behavior of blk_plug that a thread with
  260. * a plug will flush the plug list before sleeping.
  261. */
  262. wait_event(hb->wait, atomic_read(&hb->count) == 0);
  263. return blk_status_to_errno(hb->error);
  264. }
  265. /*
  266. * Saving part
  267. */
  268. static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
  269. {
  270. int error;
  271. hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
  272. swsusp_header, NULL);
  273. if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
  274. !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
  275. memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
  276. memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
  277. swsusp_header->image = handle->first_sector;
  278. swsusp_header->flags = flags;
  279. if (flags & SF_CRC32_MODE)
  280. swsusp_header->crc32 = handle->crc32;
  281. error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
  282. swsusp_resume_block, swsusp_header, NULL);
  283. } else {
  284. pr_err("Swap header not found!\n");
  285. error = -ENODEV;
  286. }
  287. return error;
  288. }
  289. /**
  290. * swsusp_swap_check - check if the resume device is a swap device
  291. * and get its index (if so)
  292. *
  293. * This is called before saving image
  294. */
  295. static int swsusp_swap_check(void)
  296. {
  297. int res;
  298. if (swsusp_resume_device)
  299. res = swap_type_of(swsusp_resume_device, swsusp_resume_block);
  300. else
  301. res = find_first_swap(&swsusp_resume_device);
  302. if (res < 0)
  303. return res;
  304. root_swap = res;
  305. hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, FMODE_WRITE,
  306. NULL);
  307. if (IS_ERR(hib_resume_bdev))
  308. return PTR_ERR(hib_resume_bdev);
  309. res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
  310. if (res < 0)
  311. blkdev_put(hib_resume_bdev, FMODE_WRITE);
  312. return res;
  313. }
  314. /**
  315. * write_page - Write one page to given swap location.
  316. * @buf: Address we're writing.
  317. * @offset: Offset of the swap page we're writing to.
  318. * @hb: bio completion batch
  319. */
  320. static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
  321. {
  322. void *src;
  323. int ret;
  324. if (!offset)
  325. return -ENOSPC;
  326. if (hb) {
  327. src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
  328. __GFP_NORETRY);
  329. if (src) {
  330. copy_page(src, buf);
  331. } else {
  332. ret = hib_wait_io(hb); /* Free pages */
  333. if (ret)
  334. return ret;
  335. src = (void *)__get_free_page(GFP_NOIO |
  336. __GFP_NOWARN |
  337. __GFP_NORETRY);
  338. if (src) {
  339. copy_page(src, buf);
  340. } else {
  341. WARN_ON_ONCE(1);
  342. hb = NULL; /* Go synchronous */
  343. src = buf;
  344. }
  345. }
  346. } else {
  347. src = buf;
  348. }
  349. return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb);
  350. }
  351. static void release_swap_writer(struct swap_map_handle *handle)
  352. {
  353. if (handle->cur)
  354. free_page((unsigned long)handle->cur);
  355. handle->cur = NULL;
  356. }
  357. static int get_swap_writer(struct swap_map_handle *handle)
  358. {
  359. int ret;
  360. ret = swsusp_swap_check();
  361. if (ret) {
  362. if (ret != -ENOSPC)
  363. pr_err("Cannot find swap device, try swapon -a\n");
  364. return ret;
  365. }
  366. handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
  367. if (!handle->cur) {
  368. ret = -ENOMEM;
  369. goto err_close;
  370. }
  371. handle->cur_swap = alloc_swapdev_block(root_swap);
  372. if (!handle->cur_swap) {
  373. ret = -ENOSPC;
  374. goto err_rel;
  375. }
  376. handle->k = 0;
  377. handle->reqd_free_pages = reqd_free_pages();
  378. handle->first_sector = handle->cur_swap;
  379. return 0;
  380. err_rel:
  381. release_swap_writer(handle);
  382. err_close:
  383. swsusp_close(FMODE_WRITE);
  384. return ret;
  385. }
  386. static int swap_write_page(struct swap_map_handle *handle, void *buf,
  387. struct hib_bio_batch *hb)
  388. {
  389. int error = 0;
  390. sector_t offset;
  391. if (!handle->cur)
  392. return -EINVAL;
  393. offset = alloc_swapdev_block(root_swap);
  394. error = write_page(buf, offset, hb);
  395. if (error)
  396. return error;
  397. handle->cur->entries[handle->k++] = offset;
  398. if (handle->k >= MAP_PAGE_ENTRIES) {
  399. offset = alloc_swapdev_block(root_swap);
  400. if (!offset)
  401. return -ENOSPC;
  402. handle->cur->next_swap = offset;
  403. error = write_page(handle->cur, handle->cur_swap, hb);
  404. if (error)
  405. goto out;
  406. clear_page(handle->cur);
  407. handle->cur_swap = offset;
  408. handle->k = 0;
  409. if (hb && low_free_pages() <= handle->reqd_free_pages) {
  410. error = hib_wait_io(hb);
  411. if (error)
  412. goto out;
  413. /*
  414. * Recalculate the number of required free pages, to
  415. * make sure we never take more than half.
  416. */
  417. handle->reqd_free_pages = reqd_free_pages();
  418. }
  419. }
  420. out:
  421. return error;
  422. }
  423. static int flush_swap_writer(struct swap_map_handle *handle)
  424. {
  425. if (handle->cur && handle->cur_swap)
  426. return write_page(handle->cur, handle->cur_swap, NULL);
  427. else
  428. return -EINVAL;
  429. }
  430. static int swap_writer_finish(struct swap_map_handle *handle,
  431. unsigned int flags, int error)
  432. {
  433. if (!error) {
  434. pr_info("S");
  435. error = mark_swapfiles(handle, flags);
  436. pr_cont("|\n");
  437. flush_swap_writer(handle);
  438. }
  439. if (error)
  440. free_all_swap_pages(root_swap);
  441. release_swap_writer(handle);
  442. swsusp_close(FMODE_WRITE);
  443. return error;
  444. }
  445. /* We need to remember how much compressed data we need to read. */
  446. #define LZO_HEADER sizeof(size_t)
  447. /* Number of pages/bytes we'll compress at one time. */
  448. #define LZO_UNC_PAGES 32
  449. #define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE)
  450. /* Number of pages/bytes we need for compressed data (worst case). */
  451. #define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
  452. LZO_HEADER, PAGE_SIZE)
  453. #define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
  454. /* Maximum number of threads for compression/decompression. */
  455. #define LZO_THREADS 3
  456. /* Minimum/maximum number of pages for read buffering. */
  457. #define LZO_MIN_RD_PAGES 1024
  458. #define LZO_MAX_RD_PAGES 8192
  459. /**
  460. * save_image - save the suspend image data
  461. */
  462. static int save_image(struct swap_map_handle *handle,
  463. struct snapshot_handle *snapshot,
  464. unsigned int nr_to_write)
  465. {
  466. unsigned int m;
  467. int ret;
  468. int nr_pages;
  469. int err2;
  470. struct hib_bio_batch hb;
  471. ktime_t start;
  472. ktime_t stop;
  473. hib_init_batch(&hb);
  474. pr_info("Saving image data pages (%u pages)...\n",
  475. nr_to_write);
  476. m = nr_to_write / 10;
  477. if (!m)
  478. m = 1;
  479. nr_pages = 0;
  480. start = ktime_get();
  481. while (1) {
  482. ret = snapshot_read_next(snapshot);
  483. if (ret <= 0)
  484. break;
  485. ret = swap_write_page(handle, data_of(*snapshot), &hb);
  486. if (ret)
  487. break;
  488. if (!(nr_pages % m))
  489. pr_info("Image saving progress: %3d%%\n",
  490. nr_pages / m * 10);
  491. nr_pages++;
  492. }
  493. err2 = hib_wait_io(&hb);
  494. hib_finish_batch(&hb);
  495. stop = ktime_get();
  496. if (!ret)
  497. ret = err2;
  498. if (!ret)
  499. pr_info("Image saving done\n");
  500. swsusp_show_speed(start, stop, nr_to_write, "Wrote");
  501. return ret;
  502. }
  503. /**
  504. * Structure used for CRC32.
  505. */
  506. struct crc_data {
  507. struct task_struct *thr; /* thread */
  508. atomic_t ready; /* ready to start flag */
  509. atomic_t stop; /* ready to stop flag */
  510. unsigned run_threads; /* nr current threads */
  511. wait_queue_head_t go; /* start crc update */
  512. wait_queue_head_t done; /* crc update done */
  513. u32 *crc32; /* points to handle's crc32 */
  514. size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */
  515. unsigned char *unc[LZO_THREADS]; /* uncompressed data */
  516. };
  517. /**
  518. * CRC32 update function that runs in its own thread.
  519. */
  520. static int crc32_threadfn(void *data)
  521. {
  522. struct crc_data *d = data;
  523. unsigned i;
  524. while (1) {
  525. wait_event(d->go, atomic_read(&d->ready) ||
  526. kthread_should_stop());
  527. if (kthread_should_stop()) {
  528. d->thr = NULL;
  529. atomic_set(&d->stop, 1);
  530. wake_up(&d->done);
  531. break;
  532. }
  533. atomic_set(&d->ready, 0);
  534. for (i = 0; i < d->run_threads; i++)
  535. *d->crc32 = crc32_le(*d->crc32,
  536. d->unc[i], *d->unc_len[i]);
  537. atomic_set(&d->stop, 1);
  538. wake_up(&d->done);
  539. }
  540. return 0;
  541. }
  542. /**
  543. * Structure used for LZO data compression.
  544. */
  545. struct cmp_data {
  546. struct task_struct *thr; /* thread */
  547. atomic_t ready; /* ready to start flag */
  548. atomic_t stop; /* ready to stop flag */
  549. int ret; /* return code */
  550. wait_queue_head_t go; /* start compression */
  551. wait_queue_head_t done; /* compression done */
  552. size_t unc_len; /* uncompressed length */
  553. size_t cmp_len; /* compressed length */
  554. unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
  555. unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
  556. unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */
  557. };
  558. /**
  559. * Compression function that runs in its own thread.
  560. */
  561. static int lzo_compress_threadfn(void *data)
  562. {
  563. struct cmp_data *d = data;
  564. while (1) {
  565. wait_event(d->go, atomic_read(&d->ready) ||
  566. kthread_should_stop());
  567. if (kthread_should_stop()) {
  568. d->thr = NULL;
  569. d->ret = -1;
  570. atomic_set(&d->stop, 1);
  571. wake_up(&d->done);
  572. break;
  573. }
  574. atomic_set(&d->ready, 0);
  575. d->ret = lzo1x_1_compress(d->unc, d->unc_len,
  576. d->cmp + LZO_HEADER, &d->cmp_len,
  577. d->wrk);
  578. atomic_set(&d->stop, 1);
  579. wake_up(&d->done);
  580. }
  581. return 0;
  582. }
  583. /**
  584. * save_image_lzo - Save the suspend image data compressed with LZO.
  585. * @handle: Swap map handle to use for saving the image.
  586. * @snapshot: Image to read data from.
  587. * @nr_to_write: Number of pages to save.
  588. */
  589. static int save_image_lzo(struct swap_map_handle *handle,
  590. struct snapshot_handle *snapshot,
  591. unsigned int nr_to_write)
  592. {
  593. unsigned int m;
  594. int ret = 0;
  595. int nr_pages;
  596. int err2;
  597. struct hib_bio_batch hb;
  598. ktime_t start;
  599. ktime_t stop;
  600. size_t off;
  601. unsigned thr, run_threads, nr_threads;
  602. unsigned char *page = NULL;
  603. struct cmp_data *data = NULL;
  604. struct crc_data *crc = NULL;
  605. hib_init_batch(&hb);
  606. /*
  607. * We'll limit the number of threads for compression to limit memory
  608. * footprint.
  609. */
  610. nr_threads = num_online_cpus() - 1;
  611. nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
  612. page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
  613. if (!page) {
  614. pr_err("Failed to allocate LZO page\n");
  615. ret = -ENOMEM;
  616. goto out_clean;
  617. }
  618. data = vmalloc(array_size(nr_threads, sizeof(*data)));
  619. if (!data) {
  620. pr_err("Failed to allocate LZO data\n");
  621. ret = -ENOMEM;
  622. goto out_clean;
  623. }
  624. for (thr = 0; thr < nr_threads; thr++)
  625. memset(&data[thr], 0, offsetof(struct cmp_data, go));
  626. crc = kmalloc(sizeof(*crc), GFP_KERNEL);
  627. if (!crc) {
  628. pr_err("Failed to allocate crc\n");
  629. ret = -ENOMEM;
  630. goto out_clean;
  631. }
  632. memset(crc, 0, offsetof(struct crc_data, go));
  633. /*
  634. * Start the compression threads.
  635. */
  636. for (thr = 0; thr < nr_threads; thr++) {
  637. init_waitqueue_head(&data[thr].go);
  638. init_waitqueue_head(&data[thr].done);
  639. data[thr].thr = kthread_run(lzo_compress_threadfn,
  640. &data[thr],
  641. "image_compress/%u", thr);
  642. if (IS_ERR(data[thr].thr)) {
  643. data[thr].thr = NULL;
  644. pr_err("Cannot start compression threads\n");
  645. ret = -ENOMEM;
  646. goto out_clean;
  647. }
  648. }
  649. /*
  650. * Start the CRC32 thread.
  651. */
  652. init_waitqueue_head(&crc->go);
  653. init_waitqueue_head(&crc->done);
  654. handle->crc32 = 0;
  655. crc->crc32 = &handle->crc32;
  656. for (thr = 0; thr < nr_threads; thr++) {
  657. crc->unc[thr] = data[thr].unc;
  658. crc->unc_len[thr] = &data[thr].unc_len;
  659. }
  660. crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
  661. if (IS_ERR(crc->thr)) {
  662. crc->thr = NULL;
  663. pr_err("Cannot start CRC32 thread\n");
  664. ret = -ENOMEM;
  665. goto out_clean;
  666. }
  667. /*
  668. * Adjust the number of required free pages after all allocations have
  669. * been done. We don't want to run out of pages when writing.
  670. */
  671. handle->reqd_free_pages = reqd_free_pages();
  672. pr_info("Using %u thread(s) for compression\n", nr_threads);
  673. pr_info("Compressing and saving image data (%u pages)...\n",
  674. nr_to_write);
  675. m = nr_to_write / 10;
  676. if (!m)
  677. m = 1;
  678. nr_pages = 0;
  679. start = ktime_get();
  680. for (;;) {
  681. for (thr = 0; thr < nr_threads; thr++) {
  682. for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
  683. ret = snapshot_read_next(snapshot);
  684. if (ret < 0)
  685. goto out_finish;
  686. if (!ret)
  687. break;
  688. memcpy(data[thr].unc + off,
  689. data_of(*snapshot), PAGE_SIZE);
  690. if (!(nr_pages % m))
  691. pr_info("Image saving progress: %3d%%\n",
  692. nr_pages / m * 10);
  693. nr_pages++;
  694. }
  695. if (!off)
  696. break;
  697. data[thr].unc_len = off;
  698. atomic_set(&data[thr].ready, 1);
  699. wake_up(&data[thr].go);
  700. }
  701. if (!thr)
  702. break;
  703. crc->run_threads = thr;
  704. atomic_set(&crc->ready, 1);
  705. wake_up(&crc->go);
  706. for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
  707. wait_event(data[thr].done,
  708. atomic_read(&data[thr].stop));
  709. atomic_set(&data[thr].stop, 0);
  710. ret = data[thr].ret;
  711. if (ret < 0) {
  712. pr_err("LZO compression failed\n");
  713. goto out_finish;
  714. }
  715. if (unlikely(!data[thr].cmp_len ||
  716. data[thr].cmp_len >
  717. lzo1x_worst_compress(data[thr].unc_len))) {
  718. pr_err("Invalid LZO compressed length\n");
  719. ret = -1;
  720. goto out_finish;
  721. }
  722. *(size_t *)data[thr].cmp = data[thr].cmp_len;
  723. /*
  724. * Given we are writing one page at a time to disk, we
  725. * copy that much from the buffer, although the last
  726. * bit will likely be smaller than full page. This is
  727. * OK - we saved the length of the compressed data, so
  728. * any garbage at the end will be discarded when we
  729. * read it.
  730. */
  731. for (off = 0;
  732. off < LZO_HEADER + data[thr].cmp_len;
  733. off += PAGE_SIZE) {
  734. memcpy(page, data[thr].cmp + off, PAGE_SIZE);
  735. ret = swap_write_page(handle, page, &hb);
  736. if (ret)
  737. goto out_finish;
  738. }
  739. }
  740. wait_event(crc->done, atomic_read(&crc->stop));
  741. atomic_set(&crc->stop, 0);
  742. }
  743. out_finish:
  744. err2 = hib_wait_io(&hb);
  745. stop = ktime_get();
  746. if (!ret)
  747. ret = err2;
  748. if (!ret)
  749. pr_info("Image saving done\n");
  750. swsusp_show_speed(start, stop, nr_to_write, "Wrote");
  751. out_clean:
  752. hib_finish_batch(&hb);
  753. if (crc) {
  754. if (crc->thr)
  755. kthread_stop(crc->thr);
  756. kfree(crc);
  757. }
  758. if (data) {
  759. for (thr = 0; thr < nr_threads; thr++)
  760. if (data[thr].thr)
  761. kthread_stop(data[thr].thr);
  762. vfree(data);
  763. }
  764. if (page) free_page((unsigned long)page);
  765. return ret;
  766. }
  767. /**
  768. * enough_swap - Make sure we have enough swap to save the image.
  769. *
  770. * Returns TRUE or FALSE after checking the total amount of swap
  771. * space avaiable from the resume partition.
  772. */
  773. static int enough_swap(unsigned int nr_pages)
  774. {
  775. unsigned int free_swap = count_swap_pages(root_swap, 1);
  776. unsigned int required;
  777. pr_debug("Free swap pages: %u\n", free_swap);
  778. required = PAGES_FOR_IO + nr_pages;
  779. return free_swap > required;
  780. }
  781. /**
  782. * swsusp_write - Write entire image and metadata.
  783. * @flags: flags to pass to the "boot" kernel in the image header
  784. *
  785. * It is important _NOT_ to umount filesystems at this point. We want
  786. * them synced (in case something goes wrong) but we DO not want to mark
  787. * filesystem clean: it is not. (And it does not matter, if we resume
  788. * correctly, we'll mark system clean, anyway.)
  789. */
  790. int swsusp_write(unsigned int flags)
  791. {
  792. struct swap_map_handle handle;
  793. struct snapshot_handle snapshot;
  794. struct swsusp_info *header;
  795. unsigned long pages;
  796. int error;
  797. pages = snapshot_get_image_size();
  798. error = get_swap_writer(&handle);
  799. if (error) {
  800. pr_err("Cannot get swap writer\n");
  801. return error;
  802. }
  803. if (flags & SF_NOCOMPRESS_MODE) {
  804. if (!enough_swap(pages)) {
  805. pr_err("Not enough free swap\n");
  806. error = -ENOSPC;
  807. goto out_finish;
  808. }
  809. }
  810. memset(&snapshot, 0, sizeof(struct snapshot_handle));
  811. error = snapshot_read_next(&snapshot);
  812. if (error < (int)PAGE_SIZE) {
  813. if (error >= 0)
  814. error = -EFAULT;
  815. goto out_finish;
  816. }
  817. header = (struct swsusp_info *)data_of(snapshot);
  818. error = swap_write_page(&handle, header, NULL);
  819. if (!error) {
  820. error = (flags & SF_NOCOMPRESS_MODE) ?
  821. save_image(&handle, &snapshot, pages - 1) :
  822. save_image_lzo(&handle, &snapshot, pages - 1);
  823. }
  824. out_finish:
  825. error = swap_writer_finish(&handle, flags, error);
  826. return error;
  827. }
  828. /**
  829. * The following functions allow us to read data using a swap map
  830. * in a file-alike way
  831. */
  832. static void release_swap_reader(struct swap_map_handle *handle)
  833. {
  834. struct swap_map_page_list *tmp;
  835. while (handle->maps) {
  836. if (handle->maps->map)
  837. free_page((unsigned long)handle->maps->map);
  838. tmp = handle->maps;
  839. handle->maps = handle->maps->next;
  840. kfree(tmp);
  841. }
  842. handle->cur = NULL;
  843. }
  844. static int get_swap_reader(struct swap_map_handle *handle,
  845. unsigned int *flags_p)
  846. {
  847. int error;
  848. struct swap_map_page_list *tmp, *last;
  849. sector_t offset;
  850. *flags_p = swsusp_header->flags;
  851. if (!swsusp_header->image) /* how can this happen? */
  852. return -EINVAL;
  853. handle->cur = NULL;
  854. last = handle->maps = NULL;
  855. offset = swsusp_header->image;
  856. while (offset) {
  857. tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL);
  858. if (!tmp) {
  859. release_swap_reader(handle);
  860. return -ENOMEM;
  861. }
  862. if (!handle->maps)
  863. handle->maps = tmp;
  864. if (last)
  865. last->next = tmp;
  866. last = tmp;
  867. tmp->map = (struct swap_map_page *)
  868. __get_free_page(GFP_NOIO | __GFP_HIGH);
  869. if (!tmp->map) {
  870. release_swap_reader(handle);
  871. return -ENOMEM;
  872. }
  873. error = hib_submit_io(REQ_OP_READ, 0, offset, tmp->map, NULL);
  874. if (error) {
  875. release_swap_reader(handle);
  876. return error;
  877. }
  878. offset = tmp->map->next_swap;
  879. }
  880. handle->k = 0;
  881. handle->cur = handle->maps->map;
  882. return 0;
  883. }
  884. static int swap_read_page(struct swap_map_handle *handle, void *buf,
  885. struct hib_bio_batch *hb)
  886. {
  887. sector_t offset;
  888. int error;
  889. struct swap_map_page_list *tmp;
  890. if (!handle->cur)
  891. return -EINVAL;
  892. offset = handle->cur->entries[handle->k];
  893. if (!offset)
  894. return -EFAULT;
  895. error = hib_submit_io(REQ_OP_READ, 0, offset, buf, hb);
  896. if (error)
  897. return error;
  898. if (++handle->k >= MAP_PAGE_ENTRIES) {
  899. handle->k = 0;
  900. free_page((unsigned long)handle->maps->map);
  901. tmp = handle->maps;
  902. handle->maps = handle->maps->next;
  903. kfree(tmp);
  904. if (!handle->maps)
  905. release_swap_reader(handle);
  906. else
  907. handle->cur = handle->maps->map;
  908. }
  909. return error;
  910. }
  911. static int swap_reader_finish(struct swap_map_handle *handle)
  912. {
  913. release_swap_reader(handle);
  914. return 0;
  915. }
  916. /**
  917. * load_image - load the image using the swap map handle
  918. * @handle and the snapshot handle @snapshot
  919. * (assume there are @nr_pages pages to load)
  920. */
  921. static int load_image(struct swap_map_handle *handle,
  922. struct snapshot_handle *snapshot,
  923. unsigned int nr_to_read)
  924. {
  925. unsigned int m;
  926. int ret = 0;
  927. ktime_t start;
  928. ktime_t stop;
  929. struct hib_bio_batch hb;
  930. int err2;
  931. unsigned nr_pages;
  932. hib_init_batch(&hb);
  933. clean_pages_on_read = true;
  934. pr_info("Loading image data pages (%u pages)...\n", nr_to_read);
  935. m = nr_to_read / 10;
  936. if (!m)
  937. m = 1;
  938. nr_pages = 0;
  939. start = ktime_get();
  940. for ( ; ; ) {
  941. ret = snapshot_write_next(snapshot);
  942. if (ret <= 0)
  943. break;
  944. ret = swap_read_page(handle, data_of(*snapshot), &hb);
  945. if (ret)
  946. break;
  947. if (snapshot->sync_read)
  948. ret = hib_wait_io(&hb);
  949. if (ret)
  950. break;
  951. if (!(nr_pages % m))
  952. pr_info("Image loading progress: %3d%%\n",
  953. nr_pages / m * 10);
  954. nr_pages++;
  955. }
  956. err2 = hib_wait_io(&hb);
  957. hib_finish_batch(&hb);
  958. stop = ktime_get();
  959. if (!ret)
  960. ret = err2;
  961. if (!ret) {
  962. pr_info("Image loading done\n");
  963. snapshot_write_finalize(snapshot);
  964. if (!snapshot_image_loaded(snapshot))
  965. ret = -ENODATA;
  966. }
  967. swsusp_show_speed(start, stop, nr_to_read, "Read");
  968. return ret;
  969. }
  970. /**
  971. * Structure used for LZO data decompression.
  972. */
  973. struct dec_data {
  974. struct task_struct *thr; /* thread */
  975. atomic_t ready; /* ready to start flag */
  976. atomic_t stop; /* ready to stop flag */
  977. int ret; /* return code */
  978. wait_queue_head_t go; /* start decompression */
  979. wait_queue_head_t done; /* decompression done */
  980. size_t unc_len; /* uncompressed length */
  981. size_t cmp_len; /* compressed length */
  982. unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
  983. unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
  984. };
  985. /**
  986. * Deompression function that runs in its own thread.
  987. */
  988. static int lzo_decompress_threadfn(void *data)
  989. {
  990. struct dec_data *d = data;
  991. while (1) {
  992. wait_event(d->go, atomic_read(&d->ready) ||
  993. kthread_should_stop());
  994. if (kthread_should_stop()) {
  995. d->thr = NULL;
  996. d->ret = -1;
  997. atomic_set(&d->stop, 1);
  998. wake_up(&d->done);
  999. break;
  1000. }
  1001. atomic_set(&d->ready, 0);
  1002. d->unc_len = LZO_UNC_SIZE;
  1003. d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
  1004. d->unc, &d->unc_len);
  1005. if (clean_pages_on_decompress)
  1006. flush_icache_range((unsigned long)d->unc,
  1007. (unsigned long)d->unc + d->unc_len);
  1008. atomic_set(&d->stop, 1);
  1009. wake_up(&d->done);
  1010. }
  1011. return 0;
  1012. }
  1013. /**
  1014. * load_image_lzo - Load compressed image data and decompress them with LZO.
  1015. * @handle: Swap map handle to use for loading data.
  1016. * @snapshot: Image to copy uncompressed data into.
  1017. * @nr_to_read: Number of pages to load.
  1018. */
  1019. static int load_image_lzo(struct swap_map_handle *handle,
  1020. struct snapshot_handle *snapshot,
  1021. unsigned int nr_to_read)
  1022. {
  1023. unsigned int m;
  1024. int ret = 0;
  1025. int eof = 0;
  1026. struct hib_bio_batch hb;
  1027. ktime_t start;
  1028. ktime_t stop;
  1029. unsigned nr_pages;
  1030. size_t off;
  1031. unsigned i, thr, run_threads, nr_threads;
  1032. unsigned ring = 0, pg = 0, ring_size = 0,
  1033. have = 0, want, need, asked = 0;
  1034. unsigned long read_pages = 0;
  1035. unsigned char **page = NULL;
  1036. struct dec_data *data = NULL;
  1037. struct crc_data *crc = NULL;
  1038. hib_init_batch(&hb);
  1039. /*
  1040. * We'll limit the number of threads for decompression to limit memory
  1041. * footprint.
  1042. */
  1043. nr_threads = num_online_cpus() - 1;
  1044. nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
  1045. page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page)));
  1046. if (!page) {
  1047. pr_err("Failed to allocate LZO page\n");
  1048. ret = -ENOMEM;
  1049. goto out_clean;
  1050. }
  1051. data = vmalloc(array_size(nr_threads, sizeof(*data)));
  1052. if (!data) {
  1053. pr_err("Failed to allocate LZO data\n");
  1054. ret = -ENOMEM;
  1055. goto out_clean;
  1056. }
  1057. for (thr = 0; thr < nr_threads; thr++)
  1058. memset(&data[thr], 0, offsetof(struct dec_data, go));
  1059. crc = kmalloc(sizeof(*crc), GFP_KERNEL);
  1060. if (!crc) {
  1061. pr_err("Failed to allocate crc\n");
  1062. ret = -ENOMEM;
  1063. goto out_clean;
  1064. }
  1065. memset(crc, 0, offsetof(struct crc_data, go));
  1066. clean_pages_on_decompress = true;
  1067. /*
  1068. * Start the decompression threads.
  1069. */
  1070. for (thr = 0; thr < nr_threads; thr++) {
  1071. init_waitqueue_head(&data[thr].go);
  1072. init_waitqueue_head(&data[thr].done);
  1073. data[thr].thr = kthread_run(lzo_decompress_threadfn,
  1074. &data[thr],
  1075. "image_decompress/%u", thr);
  1076. if (IS_ERR(data[thr].thr)) {
  1077. data[thr].thr = NULL;
  1078. pr_err("Cannot start decompression threads\n");
  1079. ret = -ENOMEM;
  1080. goto out_clean;
  1081. }
  1082. }
  1083. /*
  1084. * Start the CRC32 thread.
  1085. */
  1086. init_waitqueue_head(&crc->go);
  1087. init_waitqueue_head(&crc->done);
  1088. handle->crc32 = 0;
  1089. crc->crc32 = &handle->crc32;
  1090. for (thr = 0; thr < nr_threads; thr++) {
  1091. crc->unc[thr] = data[thr].unc;
  1092. crc->unc_len[thr] = &data[thr].unc_len;
  1093. }
  1094. crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
  1095. if (IS_ERR(crc->thr)) {
  1096. crc->thr = NULL;
  1097. pr_err("Cannot start CRC32 thread\n");
  1098. ret = -ENOMEM;
  1099. goto out_clean;
  1100. }
  1101. /*
  1102. * Set the number of pages for read buffering.
  1103. * This is complete guesswork, because we'll only know the real
  1104. * picture once prepare_image() is called, which is much later on
  1105. * during the image load phase. We'll assume the worst case and
  1106. * say that none of the image pages are from high memory.
  1107. */
  1108. if (low_free_pages() > snapshot_get_image_size())
  1109. read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
  1110. read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
  1111. for (i = 0; i < read_pages; i++) {
  1112. page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
  1113. GFP_NOIO | __GFP_HIGH :
  1114. GFP_NOIO | __GFP_NOWARN |
  1115. __GFP_NORETRY);
  1116. if (!page[i]) {
  1117. if (i < LZO_CMP_PAGES) {
  1118. ring_size = i;
  1119. pr_err("Failed to allocate LZO pages\n");
  1120. ret = -ENOMEM;
  1121. goto out_clean;
  1122. } else {
  1123. break;
  1124. }
  1125. }
  1126. }
  1127. want = ring_size = i;
  1128. pr_info("Using %u thread(s) for decompression\n", nr_threads);
  1129. pr_info("Loading and decompressing image data (%u pages)...\n",
  1130. nr_to_read);
  1131. m = nr_to_read / 10;
  1132. if (!m)
  1133. m = 1;
  1134. nr_pages = 0;
  1135. start = ktime_get();
  1136. ret = snapshot_write_next(snapshot);
  1137. if (ret <= 0)
  1138. goto out_finish;
  1139. for(;;) {
  1140. for (i = 0; !eof && i < want; i++) {
  1141. ret = swap_read_page(handle, page[ring], &hb);
  1142. if (ret) {
  1143. /*
  1144. * On real read error, finish. On end of data,
  1145. * set EOF flag and just exit the read loop.
  1146. */
  1147. if (handle->cur &&
  1148. handle->cur->entries[handle->k]) {
  1149. goto out_finish;
  1150. } else {
  1151. eof = 1;
  1152. break;
  1153. }
  1154. }
  1155. if (++ring >= ring_size)
  1156. ring = 0;
  1157. }
  1158. asked += i;
  1159. want -= i;
  1160. /*
  1161. * We are out of data, wait for some more.
  1162. */
  1163. if (!have) {
  1164. if (!asked)
  1165. break;
  1166. ret = hib_wait_io(&hb);
  1167. if (ret)
  1168. goto out_finish;
  1169. have += asked;
  1170. asked = 0;
  1171. if (eof)
  1172. eof = 2;
  1173. }
  1174. if (crc->run_threads) {
  1175. wait_event(crc->done, atomic_read(&crc->stop));
  1176. atomic_set(&crc->stop, 0);
  1177. crc->run_threads = 0;
  1178. }
  1179. for (thr = 0; have && thr < nr_threads; thr++) {
  1180. data[thr].cmp_len = *(size_t *)page[pg];
  1181. if (unlikely(!data[thr].cmp_len ||
  1182. data[thr].cmp_len >
  1183. lzo1x_worst_compress(LZO_UNC_SIZE))) {
  1184. pr_err("Invalid LZO compressed length\n");
  1185. ret = -1;
  1186. goto out_finish;
  1187. }
  1188. need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
  1189. PAGE_SIZE);
  1190. if (need > have) {
  1191. if (eof > 1) {
  1192. ret = -1;
  1193. goto out_finish;
  1194. }
  1195. break;
  1196. }
  1197. for (off = 0;
  1198. off < LZO_HEADER + data[thr].cmp_len;
  1199. off += PAGE_SIZE) {
  1200. memcpy(data[thr].cmp + off,
  1201. page[pg], PAGE_SIZE);
  1202. have--;
  1203. want++;
  1204. if (++pg >= ring_size)
  1205. pg = 0;
  1206. }
  1207. atomic_set(&data[thr].ready, 1);
  1208. wake_up(&data[thr].go);
  1209. }
  1210. /*
  1211. * Wait for more data while we are decompressing.
  1212. */
  1213. if (have < LZO_CMP_PAGES && asked) {
  1214. ret = hib_wait_io(&hb);
  1215. if (ret)
  1216. goto out_finish;
  1217. have += asked;
  1218. asked = 0;
  1219. if (eof)
  1220. eof = 2;
  1221. }
  1222. for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
  1223. wait_event(data[thr].done,
  1224. atomic_read(&data[thr].stop));
  1225. atomic_set(&data[thr].stop, 0);
  1226. ret = data[thr].ret;
  1227. if (ret < 0) {
  1228. pr_err("LZO decompression failed\n");
  1229. goto out_finish;
  1230. }
  1231. if (unlikely(!data[thr].unc_len ||
  1232. data[thr].unc_len > LZO_UNC_SIZE ||
  1233. data[thr].unc_len & (PAGE_SIZE - 1))) {
  1234. pr_err("Invalid LZO uncompressed length\n");
  1235. ret = -1;
  1236. goto out_finish;
  1237. }
  1238. for (off = 0;
  1239. off < data[thr].unc_len; off += PAGE_SIZE) {
  1240. memcpy(data_of(*snapshot),
  1241. data[thr].unc + off, PAGE_SIZE);
  1242. if (!(nr_pages % m))
  1243. pr_info("Image loading progress: %3d%%\n",
  1244. nr_pages / m * 10);
  1245. nr_pages++;
  1246. ret = snapshot_write_next(snapshot);
  1247. if (ret <= 0) {
  1248. crc->run_threads = thr + 1;
  1249. atomic_set(&crc->ready, 1);
  1250. wake_up(&crc->go);
  1251. goto out_finish;
  1252. }
  1253. }
  1254. }
  1255. crc->run_threads = thr;
  1256. atomic_set(&crc->ready, 1);
  1257. wake_up(&crc->go);
  1258. }
  1259. out_finish:
  1260. if (crc->run_threads) {
  1261. wait_event(crc->done, atomic_read(&crc->stop));
  1262. atomic_set(&crc->stop, 0);
  1263. }
  1264. stop = ktime_get();
  1265. if (!ret) {
  1266. pr_info("Image loading done\n");
  1267. snapshot_write_finalize(snapshot);
  1268. if (!snapshot_image_loaded(snapshot))
  1269. ret = -ENODATA;
  1270. if (!ret) {
  1271. if (swsusp_header->flags & SF_CRC32_MODE) {
  1272. if(handle->crc32 != swsusp_header->crc32) {
  1273. pr_err("Invalid image CRC32!\n");
  1274. ret = -ENODATA;
  1275. }
  1276. }
  1277. }
  1278. }
  1279. swsusp_show_speed(start, stop, nr_to_read, "Read");
  1280. out_clean:
  1281. hib_finish_batch(&hb);
  1282. for (i = 0; i < ring_size; i++)
  1283. free_page((unsigned long)page[i]);
  1284. if (crc) {
  1285. if (crc->thr)
  1286. kthread_stop(crc->thr);
  1287. kfree(crc);
  1288. }
  1289. if (data) {
  1290. for (thr = 0; thr < nr_threads; thr++)
  1291. if (data[thr].thr)
  1292. kthread_stop(data[thr].thr);
  1293. vfree(data);
  1294. }
  1295. vfree(page);
  1296. return ret;
  1297. }
  1298. /**
  1299. * swsusp_read - read the hibernation image.
  1300. * @flags_p: flags passed by the "frozen" kernel in the image header should
  1301. * be written into this memory location
  1302. */
  1303. int swsusp_read(unsigned int *flags_p)
  1304. {
  1305. int error;
  1306. struct swap_map_handle handle;
  1307. struct snapshot_handle snapshot;
  1308. struct swsusp_info *header;
  1309. memset(&snapshot, 0, sizeof(struct snapshot_handle));
  1310. error = snapshot_write_next(&snapshot);
  1311. if (error < (int)PAGE_SIZE)
  1312. return error < 0 ? error : -EFAULT;
  1313. header = (struct swsusp_info *)data_of(snapshot);
  1314. error = get_swap_reader(&handle, flags_p);
  1315. if (error)
  1316. goto end;
  1317. if (!error)
  1318. error = swap_read_page(&handle, header, NULL);
  1319. if (!error) {
  1320. error = (*flags_p & SF_NOCOMPRESS_MODE) ?
  1321. load_image(&handle, &snapshot, header->pages - 1) :
  1322. load_image_lzo(&handle, &snapshot, header->pages - 1);
  1323. }
  1324. swap_reader_finish(&handle);
  1325. end:
  1326. if (!error)
  1327. pr_debug("Image successfully loaded\n");
  1328. else
  1329. pr_debug("Error %d resuming\n", error);
  1330. return error;
  1331. }
  1332. /**
  1333. * swsusp_check - Check for swsusp signature in the resume device
  1334. */
  1335. int swsusp_check(void)
  1336. {
  1337. int error;
  1338. void *holder;
  1339. hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
  1340. FMODE_READ | FMODE_EXCL, &holder);
  1341. if (!IS_ERR(hib_resume_bdev)) {
  1342. set_blocksize(hib_resume_bdev, PAGE_SIZE);
  1343. clear_page(swsusp_header);
  1344. error = hib_submit_io(REQ_OP_READ, 0,
  1345. swsusp_resume_block,
  1346. swsusp_header, NULL);
  1347. if (error)
  1348. goto put;
  1349. if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
  1350. memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
  1351. /* Reset swap signature now */
  1352. error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
  1353. swsusp_resume_block,
  1354. swsusp_header, NULL);
  1355. } else {
  1356. error = -EINVAL;
  1357. }
  1358. put:
  1359. if (error)
  1360. blkdev_put(hib_resume_bdev, FMODE_READ | FMODE_EXCL);
  1361. else
  1362. pr_debug("Image signature found, resuming\n");
  1363. } else {
  1364. error = PTR_ERR(hib_resume_bdev);
  1365. }
  1366. if (error)
  1367. pr_debug("Image not found (code %d)\n", error);
  1368. return error;
  1369. }
  1370. /**
  1371. * swsusp_close - close swap device.
  1372. */
  1373. void swsusp_close(fmode_t mode)
  1374. {
  1375. if (IS_ERR(hib_resume_bdev)) {
  1376. pr_debug("Image device not initialised\n");
  1377. return;
  1378. }
  1379. blkdev_put(hib_resume_bdev, mode);
  1380. }
  1381. /**
  1382. * swsusp_unmark - Unmark swsusp signature in the resume device
  1383. */
  1384. #ifdef CONFIG_SUSPEND
  1385. int swsusp_unmark(void)
  1386. {
  1387. int error;
  1388. hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
  1389. swsusp_header, NULL);
  1390. if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
  1391. memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
  1392. error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
  1393. swsusp_resume_block,
  1394. swsusp_header, NULL);
  1395. } else {
  1396. pr_err("Cannot find swsusp signature!\n");
  1397. error = -ENODEV;
  1398. }
  1399. /*
  1400. * We just returned from suspend, we don't need the image any more.
  1401. */
  1402. free_all_swap_pages(root_swap);
  1403. return error;
  1404. }
  1405. #endif
  1406. static int __init swsusp_header_init(void)
  1407. {
  1408. swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
  1409. if (!swsusp_header)
  1410. panic("Could not allocate memory for swsusp_header\n");
  1411. return 0;
  1412. }
  1413. core_initcall(swsusp_header_init);