decompressor.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2019 HUAWEI, Inc.
  4. * https://www.huawei.com/
  5. * Created by Gao Xiang <gaoxiang25@huawei.com>
  6. */
  7. #include "compress.h"
  8. #include <linux/module.h>
  9. #include <linux/lz4.h>
  10. #ifndef LZ4_DISTANCE_MAX /* history window size */
  11. #define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
  12. #endif
  13. #define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
  14. #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
  15. #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
  16. #endif
  17. struct z_erofs_decompressor {
  18. /*
  19. * if destpages have sparsed pages, fill them with bounce pages.
  20. * it also check whether destpages indicate continuous physical memory.
  21. */
  22. int (*prepare_destpages)(struct z_erofs_decompress_req *rq,
  23. struct list_head *pagepool);
  24. int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out);
  25. char *name;
  26. };
  27. int z_erofs_load_lz4_config(struct super_block *sb,
  28. struct erofs_super_block *dsb,
  29. struct z_erofs_lz4_cfgs *lz4, int size)
  30. {
  31. struct erofs_sb_info *sbi = EROFS_SB(sb);
  32. u16 distance;
  33. if (lz4) {
  34. if (size < sizeof(struct z_erofs_lz4_cfgs)) {
  35. erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
  36. return -EINVAL;
  37. }
  38. distance = le16_to_cpu(lz4->max_distance);
  39. sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
  40. if (!sbi->lz4.max_pclusterblks) {
  41. sbi->lz4.max_pclusterblks = 1; /* reserved case */
  42. } else if (sbi->lz4.max_pclusterblks >
  43. Z_EROFS_PCLUSTER_MAX_SIZE / EROFS_BLKSIZ) {
  44. erofs_err(sb, "too large lz4 pclusterblks %u",
  45. sbi->lz4.max_pclusterblks);
  46. return -EINVAL;
  47. } else if (sbi->lz4.max_pclusterblks >= 2) {
  48. erofs_info(sb, "EXPERIMENTAL big pcluster feature in use. Use at your own risk!");
  49. }
  50. } else {
  51. distance = le16_to_cpu(dsb->u1.lz4_max_distance);
  52. sbi->lz4.max_pclusterblks = 1;
  53. }
  54. sbi->lz4.max_distance_pages = distance ?
  55. DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
  56. LZ4_MAX_DISTANCE_PAGES;
  57. return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks);
  58. }
  59. static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
  60. struct list_head *pagepool)
  61. {
  62. const unsigned int nr =
  63. PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
  64. struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
  65. unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
  66. BITS_PER_LONG)] = { 0 };
  67. unsigned int lz4_max_distance_pages =
  68. EROFS_SB(rq->sb)->lz4.max_distance_pages;
  69. void *kaddr = NULL;
  70. unsigned int i, j, top;
  71. top = 0;
  72. for (i = j = 0; i < nr; ++i, ++j) {
  73. struct page *const page = rq->out[i];
  74. struct page *victim;
  75. if (j >= lz4_max_distance_pages)
  76. j = 0;
  77. /* 'valid' bounced can only be tested after a complete round */
  78. if (test_bit(j, bounced)) {
  79. DBG_BUGON(i < lz4_max_distance_pages);
  80. DBG_BUGON(top >= lz4_max_distance_pages);
  81. availables[top++] = rq->out[i - lz4_max_distance_pages];
  82. }
  83. if (page) {
  84. __clear_bit(j, bounced);
  85. if (kaddr) {
  86. if (kaddr + PAGE_SIZE == page_address(page))
  87. kaddr += PAGE_SIZE;
  88. else
  89. kaddr = NULL;
  90. } else if (!i) {
  91. kaddr = page_address(page);
  92. }
  93. continue;
  94. }
  95. kaddr = NULL;
  96. __set_bit(j, bounced);
  97. if (top) {
  98. victim = availables[--top];
  99. get_page(victim);
  100. } else {
  101. victim = erofs_allocpage(pagepool,
  102. GFP_KERNEL | __GFP_NOFAIL);
  103. set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
  104. }
  105. rq->out[i] = victim;
  106. }
  107. return kaddr ? 1 : 0;
  108. }
  109. static void *z_erofs_handle_inplace_io(struct z_erofs_decompress_req *rq,
  110. void *inpage, unsigned int *inputmargin, int *maptype,
  111. bool support_0padding)
  112. {
  113. unsigned int nrpages_in, nrpages_out;
  114. unsigned int ofull, oend, inputsize, total, i, j;
  115. struct page **in;
  116. void *src, *tmp;
  117. inputsize = rq->inputsize;
  118. nrpages_in = PAGE_ALIGN(inputsize) >> PAGE_SHIFT;
  119. oend = rq->pageofs_out + rq->outputsize;
  120. ofull = PAGE_ALIGN(oend);
  121. nrpages_out = ofull >> PAGE_SHIFT;
  122. if (rq->inplace_io) {
  123. if (rq->partial_decoding || !support_0padding ||
  124. ofull - oend < LZ4_DECOMPRESS_INPLACE_MARGIN(inputsize))
  125. goto docopy;
  126. for (i = 0; i < nrpages_in; ++i) {
  127. DBG_BUGON(rq->in[i] == NULL);
  128. for (j = 0; j < nrpages_out - nrpages_in + i; ++j)
  129. if (rq->out[j] == rq->in[i])
  130. goto docopy;
  131. }
  132. }
  133. if (nrpages_in <= 1) {
  134. *maptype = 0;
  135. return inpage;
  136. }
  137. kunmap_atomic(inpage);
  138. might_sleep();
  139. src = erofs_vm_map_ram(rq->in, nrpages_in);
  140. if (!src)
  141. return ERR_PTR(-ENOMEM);
  142. *maptype = 1;
  143. return src;
  144. docopy:
  145. /* Or copy compressed data which can be overlapped to per-CPU buffer */
  146. in = rq->in;
  147. src = erofs_get_pcpubuf(nrpages_in);
  148. if (!src) {
  149. DBG_BUGON(1);
  150. kunmap_atomic(inpage);
  151. return ERR_PTR(-EFAULT);
  152. }
  153. tmp = src;
  154. total = rq->inputsize;
  155. while (total) {
  156. unsigned int page_copycnt =
  157. min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
  158. if (!inpage)
  159. inpage = kmap_atomic(*in);
  160. memcpy(tmp, inpage + *inputmargin, page_copycnt);
  161. kunmap_atomic(inpage);
  162. inpage = NULL;
  163. tmp += page_copycnt;
  164. total -= page_copycnt;
  165. ++in;
  166. *inputmargin = 0;
  167. }
  168. *maptype = 2;
  169. return src;
  170. }
  171. static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
  172. {
  173. unsigned int inputmargin;
  174. u8 *headpage, *src;
  175. bool support_0padding;
  176. int ret, maptype;
  177. DBG_BUGON(*rq->in == NULL);
  178. headpage = kmap_atomic(*rq->in);
  179. inputmargin = 0;
  180. support_0padding = false;
  181. /* decompression inplace is only safe when 0padding is enabled */
  182. if (erofs_sb_has_lz4_0padding(EROFS_SB(rq->sb))) {
  183. support_0padding = true;
  184. while (!headpage[inputmargin & ~PAGE_MASK])
  185. if (!(++inputmargin & ~PAGE_MASK))
  186. break;
  187. if (inputmargin >= rq->inputsize) {
  188. kunmap_atomic(headpage);
  189. return -EIO;
  190. }
  191. }
  192. rq->inputsize -= inputmargin;
  193. src = z_erofs_handle_inplace_io(rq, headpage, &inputmargin, &maptype,
  194. support_0padding);
  195. if (IS_ERR(src))
  196. return PTR_ERR(src);
  197. /* legacy format could compress extra data in a pcluster. */
  198. if (rq->partial_decoding || !support_0padding)
  199. ret = LZ4_decompress_safe_partial(src + inputmargin, out,
  200. rq->inputsize, rq->outputsize, rq->outputsize);
  201. else
  202. ret = LZ4_decompress_safe(src + inputmargin, out,
  203. rq->inputsize, rq->outputsize);
  204. if (ret != rq->outputsize) {
  205. erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
  206. ret, rq->inputsize, inputmargin, rq->outputsize);
  207. print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
  208. 16, 1, src + inputmargin, rq->inputsize, true);
  209. print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
  210. 16, 1, out, rq->outputsize, true);
  211. if (ret >= 0)
  212. memset(out + ret, 0, rq->outputsize - ret);
  213. ret = -EIO;
  214. }
  215. if (maptype == 0) {
  216. kunmap_atomic(src);
  217. } else if (maptype == 1) {
  218. vm_unmap_ram(src, PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT);
  219. } else if (maptype == 2) {
  220. erofs_put_pcpubuf(src);
  221. } else {
  222. DBG_BUGON(1);
  223. return -EFAULT;
  224. }
  225. return ret;
  226. }
  227. static struct z_erofs_decompressor decompressors[] = {
  228. [Z_EROFS_COMPRESSION_SHIFTED] = {
  229. .name = "shifted"
  230. },
  231. [Z_EROFS_COMPRESSION_LZ4] = {
  232. .prepare_destpages = z_erofs_lz4_prepare_destpages,
  233. .decompress = z_erofs_lz4_decompress,
  234. .name = "lz4"
  235. },
  236. };
  237. static void copy_from_pcpubuf(struct page **out, const char *dst,
  238. unsigned short pageofs_out,
  239. unsigned int outputsize)
  240. {
  241. const char *end = dst + outputsize;
  242. const unsigned int righthalf = PAGE_SIZE - pageofs_out;
  243. const char *cur = dst - pageofs_out;
  244. while (cur < end) {
  245. struct page *const page = *out++;
  246. if (page) {
  247. char *buf = kmap_atomic(page);
  248. if (cur >= dst) {
  249. memcpy(buf, cur, min_t(uint, PAGE_SIZE,
  250. end - cur));
  251. } else {
  252. memcpy(buf + pageofs_out, cur + pageofs_out,
  253. min_t(uint, righthalf, end - cur));
  254. }
  255. kunmap_atomic(buf);
  256. }
  257. cur += PAGE_SIZE;
  258. }
  259. }
  260. static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
  261. struct list_head *pagepool)
  262. {
  263. const unsigned int nrpages_out =
  264. PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
  265. const struct z_erofs_decompressor *alg = decompressors + rq->alg;
  266. unsigned int dst_maptype;
  267. void *dst;
  268. int ret;
  269. /* two optimized fast paths only for non bigpcluster cases yet */
  270. if (rq->inputsize <= PAGE_SIZE) {
  271. if (nrpages_out == 1 && !rq->inplace_io) {
  272. DBG_BUGON(!*rq->out);
  273. dst = kmap_atomic(*rq->out);
  274. dst_maptype = 0;
  275. goto dstmap_out;
  276. }
  277. /*
  278. * For the case of small output size (especially much less
  279. * than PAGE_SIZE), memcpy the decompressed data rather than
  280. * compressed data is preferred.
  281. */
  282. if (rq->outputsize <= PAGE_SIZE * 7 / 8) {
  283. dst = erofs_get_pcpubuf(1);
  284. if (IS_ERR(dst))
  285. return PTR_ERR(dst);
  286. rq->inplace_io = false;
  287. ret = alg->decompress(rq, dst);
  288. if (!ret)
  289. copy_from_pcpubuf(rq->out, dst, rq->pageofs_out,
  290. rq->outputsize);
  291. erofs_put_pcpubuf(dst);
  292. return ret;
  293. }
  294. }
  295. /* general decoding path which can be used for all cases */
  296. ret = alg->prepare_destpages(rq, pagepool);
  297. if (ret < 0)
  298. return ret;
  299. if (ret) {
  300. dst = page_address(*rq->out);
  301. dst_maptype = 1;
  302. goto dstmap_out;
  303. }
  304. dst = erofs_vm_map_ram(rq->out, nrpages_out);
  305. if (!dst)
  306. return -ENOMEM;
  307. dst_maptype = 2;
  308. dstmap_out:
  309. ret = alg->decompress(rq, dst + rq->pageofs_out);
  310. if (!dst_maptype)
  311. kunmap_atomic(dst);
  312. else if (dst_maptype == 2)
  313. vm_unmap_ram(dst, nrpages_out);
  314. return ret;
  315. }
  316. static int z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq,
  317. struct list_head *pagepool)
  318. {
  319. const unsigned int nrpages_out =
  320. PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
  321. const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out;
  322. unsigned char *src, *dst;
  323. if (nrpages_out > 2) {
  324. DBG_BUGON(1);
  325. return -EIO;
  326. }
  327. if (rq->out[0] == *rq->in) {
  328. DBG_BUGON(nrpages_out != 1);
  329. return 0;
  330. }
  331. src = kmap_atomic(*rq->in);
  332. if (rq->out[0]) {
  333. dst = kmap_atomic(rq->out[0]);
  334. memcpy(dst + rq->pageofs_out, src, righthalf);
  335. kunmap_atomic(dst);
  336. }
  337. if (nrpages_out == 2) {
  338. DBG_BUGON(!rq->out[1]);
  339. if (rq->out[1] == *rq->in) {
  340. memmove(src, src + righthalf, rq->pageofs_out);
  341. } else {
  342. dst = kmap_atomic(rq->out[1]);
  343. memcpy(dst, src + righthalf, rq->pageofs_out);
  344. kunmap_atomic(dst);
  345. }
  346. }
  347. kunmap_atomic(src);
  348. return 0;
  349. }
  350. int z_erofs_decompress(struct z_erofs_decompress_req *rq,
  351. struct list_head *pagepool)
  352. {
  353. if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED)
  354. return z_erofs_shifted_transform(rq, pagepool);
  355. return z_erofs_decompress_generic(rq, pagepool);
  356. }