lz4_decompress.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720
  1. /*
  2. * LZ4 - Fast LZ compression algorithm
  3. * Copyright (C) 2011 - 2016, Yann Collet.
  4. * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are
  7. * met:
  8. * * Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * * Redistributions in binary form must reproduce the above
  11. * copyright notice, this list of conditions and the following disclaimer
  12. * in the documentation and/or other materials provided with the
  13. * distribution.
  14. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  15. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  16. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  17. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  18. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  19. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  20. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  21. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  22. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  23. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. * You can contact the author at :
  26. * - LZ4 homepage : http://www.lz4.org
  27. * - LZ4 source repository : https://github.com/lz4/lz4
  28. *
  29. * Changed for kernel usage by:
  30. * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
  31. */
  32. /*-************************************
  33. * Dependencies
  34. **************************************/
  35. #include <linux/lz4.h>
  36. #include "lz4defs.h"
  37. #include <linux/init.h>
  38. #include <linux/module.h>
  39. #include <linux/kernel.h>
  40. #include <asm/unaligned.h>
  41. /*-*****************************
  42. * Decompression functions
  43. *******************************/
  44. #define DEBUGLOG(l, ...) {} /* disabled */
  45. #ifndef assert
  46. #define assert(condition) ((void)0)
  47. #endif
  48. /*
  49. * LZ4_decompress_generic() :
  50. * This generic decompression function covers all use cases.
  51. * It shall be instantiated several times, using different sets of directives.
  52. * Note that it is important for performance that this function really get inlined,
  53. * in order to remove useless branches during compilation optimization.
  54. */
  55. static FORCE_INLINE int LZ4_decompress_generic(
  56. const char * const src,
  57. char * const dst,
  58. int srcSize,
  59. /*
  60. * If endOnInput == endOnInputSize,
  61. * this value is `dstCapacity`
  62. */
  63. int outputSize,
  64. /* endOnOutputSize, endOnInputSize */
  65. endCondition_directive endOnInput,
  66. /* full, partial */
  67. earlyEnd_directive partialDecoding,
  68. /* noDict, withPrefix64k, usingExtDict */
  69. dict_directive dict,
  70. /* always <= dst, == dst when no prefix */
  71. const BYTE * const lowPrefix,
  72. /* only if dict == usingExtDict */
  73. const BYTE * const dictStart,
  74. /* note : = 0 if noDict */
  75. const size_t dictSize
  76. )
  77. {
  78. const BYTE *ip = (const BYTE *) src;
  79. const BYTE * const iend = ip + srcSize;
  80. BYTE *op = (BYTE *) dst;
  81. BYTE * const oend = op + outputSize;
  82. BYTE *cpy;
  83. const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize;
  84. static const unsigned int inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
  85. static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
  86. const int safeDecode = (endOnInput == endOnInputSize);
  87. const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB)));
  88. /* Set up the "end" pointers for the shortcut. */
  89. const BYTE *const shortiend = iend -
  90. (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/;
  91. const BYTE *const shortoend = oend -
  92. (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/;
  93. DEBUGLOG(5, "%s (srcSize:%i, dstSize:%i)", __func__,
  94. srcSize, outputSize);
  95. /* Special cases */
  96. assert(lowPrefix <= op);
  97. assert(src != NULL);
  98. /* Empty output buffer */
  99. if ((endOnInput) && (unlikely(outputSize == 0)))
  100. return ((srcSize == 1) && (*ip == 0)) ? 0 : -1;
  101. if ((!endOnInput) && (unlikely(outputSize == 0)))
  102. return (*ip == 0 ? 1 : -1);
  103. if ((endOnInput) && unlikely(srcSize == 0))
  104. return -1;
  105. /* Main Loop : decode sequences */
  106. while (1) {
  107. size_t length;
  108. const BYTE *match;
  109. size_t offset;
  110. /* get literal length */
  111. unsigned int const token = *ip++;
  112. length = token>>ML_BITS;
  113. /* ip < iend before the increment */
  114. assert(!endOnInput || ip <= iend);
  115. /*
  116. * A two-stage shortcut for the most common case:
  117. * 1) If the literal length is 0..14, and there is enough
  118. * space, enter the shortcut and copy 16 bytes on behalf
  119. * of the literals (in the fast mode, only 8 bytes can be
  120. * safely copied this way).
  121. * 2) Further if the match length is 4..18, copy 18 bytes
  122. * in a similar manner; but we ensure that there's enough
  123. * space in the output for those 18 bytes earlier, upon
  124. * entering the shortcut (in other words, there is a
  125. * combined check for both stages).
  126. *
  127. * The & in the likely() below is intentionally not && so that
  128. * some compilers can produce better parallelized runtime code
  129. */
  130. if ((endOnInput ? length != RUN_MASK : length <= 8)
  131. /*
  132. * strictly "less than" on input, to re-enter
  133. * the loop with at least one byte
  134. */
  135. && likely((endOnInput ? ip < shortiend : 1) &
  136. (op <= shortoend))) {
  137. /* Copy the literals */
  138. LZ4_memcpy(op, ip, endOnInput ? 16 : 8);
  139. op += length; ip += length;
  140. /*
  141. * The second stage:
  142. * prepare for match copying, decode full info.
  143. * If it doesn't work out, the info won't be wasted.
  144. */
  145. length = token & ML_MASK; /* match length */
  146. offset = LZ4_readLE16(ip);
  147. ip += 2;
  148. match = op - offset;
  149. assert(match <= op); /* check overflow */
  150. /* Do not deal with overlapping matches. */
  151. if ((length != ML_MASK) &&
  152. (offset >= 8) &&
  153. (dict == withPrefix64k || match >= lowPrefix)) {
  154. /* Copy the match. */
  155. LZ4_memcpy(op + 0, match + 0, 8);
  156. LZ4_memcpy(op + 8, match + 8, 8);
  157. LZ4_memcpy(op + 16, match + 16, 2);
  158. op += length + MINMATCH;
  159. /* Both stages worked, load the next token. */
  160. continue;
  161. }
  162. /*
  163. * The second stage didn't work out, but the info
  164. * is ready. Propel it right to the point of match
  165. * copying.
  166. */
  167. goto _copy_match;
  168. }
  169. /* decode literal length */
  170. if (length == RUN_MASK) {
  171. unsigned int s;
  172. if (unlikely(endOnInput ? ip >= iend - RUN_MASK : 0)) {
  173. /* overflow detection */
  174. goto _output_error;
  175. }
  176. do {
  177. s = *ip++;
  178. length += s;
  179. } while (likely(endOnInput
  180. ? ip < iend - RUN_MASK
  181. : 1) & (s == 255));
  182. if ((safeDecode)
  183. && unlikely((uptrval)(op) +
  184. length < (uptrval)(op))) {
  185. /* overflow detection */
  186. goto _output_error;
  187. }
  188. if ((safeDecode)
  189. && unlikely((uptrval)(ip) +
  190. length < (uptrval)(ip))) {
  191. /* overflow detection */
  192. goto _output_error;
  193. }
  194. }
  195. /* copy literals */
  196. cpy = op + length;
  197. LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
  198. if (((endOnInput) && ((cpy > oend - MFLIMIT)
  199. || (ip + length > iend - (2 + 1 + LASTLITERALS))))
  200. || ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) {
  201. if (partialDecoding) {
  202. if (cpy > oend) {
  203. /*
  204. * Partial decoding :
  205. * stop in the middle of literal segment
  206. */
  207. cpy = oend;
  208. length = oend - op;
  209. }
  210. if ((endOnInput)
  211. && (ip + length > iend)) {
  212. /*
  213. * Error :
  214. * read attempt beyond
  215. * end of input buffer
  216. */
  217. goto _output_error;
  218. }
  219. } else {
  220. if ((!endOnInput)
  221. && (cpy != oend)) {
  222. /*
  223. * Error :
  224. * block decoding must
  225. * stop exactly there
  226. */
  227. goto _output_error;
  228. }
  229. if ((endOnInput)
  230. && ((ip + length != iend)
  231. || (cpy > oend))) {
  232. /*
  233. * Error :
  234. * input must be consumed
  235. */
  236. goto _output_error;
  237. }
  238. }
  239. /*
  240. * supports overlapping memory regions; only matters
  241. * for in-place decompression scenarios
  242. */
  243. LZ4_memmove(op, ip, length);
  244. ip += length;
  245. op += length;
  246. /* Necessarily EOF when !partialDecoding.
  247. * When partialDecoding, it is EOF if we've either
  248. * filled the output buffer or
  249. * can't proceed with reading an offset for following match.
  250. */
  251. if (!partialDecoding || (cpy == oend) || (ip >= (iend - 2)))
  252. break;
  253. } else {
  254. /* may overwrite up to WILDCOPYLENGTH beyond cpy */
  255. LZ4_wildCopy(op, ip, cpy);
  256. ip += length;
  257. op = cpy;
  258. }
  259. /* get offset */
  260. offset = LZ4_readLE16(ip);
  261. ip += 2;
  262. match = op - offset;
  263. /* get matchlength */
  264. length = token & ML_MASK;
  265. _copy_match:
  266. if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) {
  267. /* Error : offset outside buffers */
  268. goto _output_error;
  269. }
  270. /* costs ~1%; silence an msan warning when offset == 0 */
  271. /*
  272. * note : when partialDecoding, there is no guarantee that
  273. * at least 4 bytes remain available in output buffer
  274. */
  275. if (!partialDecoding) {
  276. assert(oend > op);
  277. assert(oend - op >= 4);
  278. LZ4_write32(op, (U32)offset);
  279. }
  280. if (length == ML_MASK) {
  281. unsigned int s;
  282. do {
  283. s = *ip++;
  284. if ((endOnInput) && (ip > iend - LASTLITERALS))
  285. goto _output_error;
  286. length += s;
  287. } while (s == 255);
  288. if ((safeDecode)
  289. && unlikely(
  290. (uptrval)(op) + length < (uptrval)op)) {
  291. /* overflow detection */
  292. goto _output_error;
  293. }
  294. }
  295. length += MINMATCH;
  296. /* match starting within external dictionary */
  297. if ((dict == usingExtDict) && (match < lowPrefix)) {
  298. if (unlikely(op + length > oend - LASTLITERALS)) {
  299. /* doesn't respect parsing restriction */
  300. if (!partialDecoding)
  301. goto _output_error;
  302. length = min(length, (size_t)(oend - op));
  303. }
  304. if (length <= (size_t)(lowPrefix - match)) {
  305. /*
  306. * match fits entirely within external
  307. * dictionary : just copy
  308. */
  309. memmove(op, dictEnd - (lowPrefix - match),
  310. length);
  311. op += length;
  312. } else {
  313. /*
  314. * match stretches into both external
  315. * dictionary and current block
  316. */
  317. size_t const copySize = (size_t)(lowPrefix - match);
  318. size_t const restSize = length - copySize;
  319. LZ4_memcpy(op, dictEnd - copySize, copySize);
  320. op += copySize;
  321. if (restSize > (size_t)(op - lowPrefix)) {
  322. /* overlap copy */
  323. BYTE * const endOfMatch = op + restSize;
  324. const BYTE *copyFrom = lowPrefix;
  325. while (op < endOfMatch)
  326. *op++ = *copyFrom++;
  327. } else {
  328. LZ4_memcpy(op, lowPrefix, restSize);
  329. op += restSize;
  330. }
  331. }
  332. continue;
  333. }
  334. /* copy match within block */
  335. cpy = op + length;
  336. /*
  337. * partialDecoding :
  338. * may not respect endBlock parsing restrictions
  339. */
  340. assert(op <= oend);
  341. if (partialDecoding &&
  342. (cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
  343. size_t const mlen = min(length, (size_t)(oend - op));
  344. const BYTE * const matchEnd = match + mlen;
  345. BYTE * const copyEnd = op + mlen;
  346. if (matchEnd > op) {
  347. /* overlap copy */
  348. while (op < copyEnd)
  349. *op++ = *match++;
  350. } else {
  351. LZ4_memcpy(op, match, mlen);
  352. }
  353. op = copyEnd;
  354. if (op == oend)
  355. break;
  356. continue;
  357. }
  358. if (unlikely(offset < 8)) {
  359. op[0] = match[0];
  360. op[1] = match[1];
  361. op[2] = match[2];
  362. op[3] = match[3];
  363. match += inc32table[offset];
  364. LZ4_memcpy(op + 4, match, 4);
  365. match -= dec64table[offset];
  366. } else {
  367. LZ4_copy8(op, match);
  368. match += 8;
  369. }
  370. op += 8;
  371. if (unlikely(cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
  372. BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1);
  373. if (cpy > oend - LASTLITERALS) {
  374. /*
  375. * Error : last LASTLITERALS bytes
  376. * must be literals (uncompressed)
  377. */
  378. goto _output_error;
  379. }
  380. if (op < oCopyLimit) {
  381. LZ4_wildCopy(op, match, oCopyLimit);
  382. match += oCopyLimit - op;
  383. op = oCopyLimit;
  384. }
  385. while (op < cpy)
  386. *op++ = *match++;
  387. } else {
  388. LZ4_copy8(op, match);
  389. if (length > 16)
  390. LZ4_wildCopy(op + 8, match + 8, cpy);
  391. }
  392. op = cpy; /* wildcopy correction */
  393. }
  394. /* end of decoding */
  395. if (endOnInput) {
  396. /* Nb of output bytes decoded */
  397. return (int) (((char *)op) - dst);
  398. } else {
  399. /* Nb of input bytes read */
  400. return (int) (((const char *)ip) - src);
  401. }
  402. /* Overflow error detected */
  403. _output_error:
  404. return (int) (-(((const char *)ip) - src)) - 1;
  405. }
  406. int LZ4_decompress_safe(const char *source, char *dest,
  407. int compressedSize, int maxDecompressedSize)
  408. {
  409. return LZ4_decompress_generic(source, dest,
  410. compressedSize, maxDecompressedSize,
  411. endOnInputSize, decode_full_block,
  412. noDict, (BYTE *)dest, NULL, 0);
  413. }
  414. int LZ4_decompress_safe_partial(const char *src, char *dst,
  415. int compressedSize, int targetOutputSize, int dstCapacity)
  416. {
  417. dstCapacity = min(targetOutputSize, dstCapacity);
  418. return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
  419. endOnInputSize, partial_decode,
  420. noDict, (BYTE *)dst, NULL, 0);
  421. }
  422. int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
  423. {
  424. return LZ4_decompress_generic(source, dest, 0, originalSize,
  425. endOnOutputSize, decode_full_block,
  426. withPrefix64k,
  427. (BYTE *)dest - 64 * KB, NULL, 0);
  428. }
  429. /* ===== Instantiate a few more decoding cases, used more than once. ===== */
  430. int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest,
  431. int compressedSize, int maxOutputSize)
  432. {
  433. return LZ4_decompress_generic(source, dest,
  434. compressedSize, maxOutputSize,
  435. endOnInputSize, decode_full_block,
  436. withPrefix64k,
  437. (BYTE *)dest - 64 * KB, NULL, 0);
  438. }
  439. static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest,
  440. int compressedSize,
  441. int maxOutputSize,
  442. size_t prefixSize)
  443. {
  444. return LZ4_decompress_generic(source, dest,
  445. compressedSize, maxOutputSize,
  446. endOnInputSize, decode_full_block,
  447. noDict,
  448. (BYTE *)dest - prefixSize, NULL, 0);
  449. }
  450. int LZ4_decompress_safe_forceExtDict(const char *source, char *dest,
  451. int compressedSize, int maxOutputSize,
  452. const void *dictStart, size_t dictSize)
  453. {
  454. return LZ4_decompress_generic(source, dest,
  455. compressedSize, maxOutputSize,
  456. endOnInputSize, decode_full_block,
  457. usingExtDict, (BYTE *)dest,
  458. (const BYTE *)dictStart, dictSize);
  459. }
  460. static int LZ4_decompress_fast_extDict(const char *source, char *dest,
  461. int originalSize,
  462. const void *dictStart, size_t dictSize)
  463. {
  464. return LZ4_decompress_generic(source, dest,
  465. 0, originalSize,
  466. endOnOutputSize, decode_full_block,
  467. usingExtDict, (BYTE *)dest,
  468. (const BYTE *)dictStart, dictSize);
  469. }
  470. /*
  471. * The "double dictionary" mode, for use with e.g. ring buffers: the first part
  472. * of the dictionary is passed as prefix, and the second via dictStart + dictSize.
  473. * These routines are used only once, in LZ4_decompress_*_continue().
  474. */
  475. static FORCE_INLINE
  476. int LZ4_decompress_safe_doubleDict(const char *source, char *dest,
  477. int compressedSize, int maxOutputSize,
  478. size_t prefixSize,
  479. const void *dictStart, size_t dictSize)
  480. {
  481. return LZ4_decompress_generic(source, dest,
  482. compressedSize, maxOutputSize,
  483. endOnInputSize, decode_full_block,
  484. usingExtDict, (BYTE *)dest - prefixSize,
  485. (const BYTE *)dictStart, dictSize);
  486. }
  487. static FORCE_INLINE
  488. int LZ4_decompress_fast_doubleDict(const char *source, char *dest,
  489. int originalSize, size_t prefixSize,
  490. const void *dictStart, size_t dictSize)
  491. {
  492. return LZ4_decompress_generic(source, dest,
  493. 0, originalSize,
  494. endOnOutputSize, decode_full_block,
  495. usingExtDict, (BYTE *)dest - prefixSize,
  496. (const BYTE *)dictStart, dictSize);
  497. }
  498. /* ===== streaming decompression functions ===== */
  499. int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
  500. const char *dictionary, int dictSize)
  501. {
  502. LZ4_streamDecode_t_internal *lz4sd =
  503. &LZ4_streamDecode->internal_donotuse;
  504. lz4sd->prefixSize = (size_t) dictSize;
  505. lz4sd->prefixEnd = (const BYTE *) dictionary + dictSize;
  506. lz4sd->externalDict = NULL;
  507. lz4sd->extDictSize = 0;
  508. return 1;
  509. }
  510. /*
  511. * *_continue() :
  512. * These decoding functions allow decompression of multiple blocks
  513. * in "streaming" mode.
  514. * Previously decoded blocks must still be available at the memory
  515. * position where they were decoded.
  516. * If it's not possible, save the relevant part of
  517. * decoded data into a safe buffer,
  518. * and indicate where it stands using LZ4_setStreamDecode()
  519. */
  520. int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
  521. const char *source, char *dest, int compressedSize, int maxOutputSize)
  522. {
  523. LZ4_streamDecode_t_internal *lz4sd =
  524. &LZ4_streamDecode->internal_donotuse;
  525. int result;
  526. if (lz4sd->prefixSize == 0) {
  527. /* The first call, no dictionary yet. */
  528. assert(lz4sd->extDictSize == 0);
  529. result = LZ4_decompress_safe(source, dest,
  530. compressedSize, maxOutputSize);
  531. if (result <= 0)
  532. return result;
  533. lz4sd->prefixSize = result;
  534. lz4sd->prefixEnd = (BYTE *)dest + result;
  535. } else if (lz4sd->prefixEnd == (BYTE *)dest) {
  536. /* They're rolling the current segment. */
  537. if (lz4sd->prefixSize >= 64 * KB - 1)
  538. result = LZ4_decompress_safe_withPrefix64k(source, dest,
  539. compressedSize, maxOutputSize);
  540. else if (lz4sd->extDictSize == 0)
  541. result = LZ4_decompress_safe_withSmallPrefix(source,
  542. dest, compressedSize, maxOutputSize,
  543. lz4sd->prefixSize);
  544. else
  545. result = LZ4_decompress_safe_doubleDict(source, dest,
  546. compressedSize, maxOutputSize,
  547. lz4sd->prefixSize,
  548. lz4sd->externalDict, lz4sd->extDictSize);
  549. if (result <= 0)
  550. return result;
  551. lz4sd->prefixSize += result;
  552. lz4sd->prefixEnd += result;
  553. } else {
  554. /*
  555. * The buffer wraps around, or they're
  556. * switching to another buffer.
  557. */
  558. lz4sd->extDictSize = lz4sd->prefixSize;
  559. lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
  560. result = LZ4_decompress_safe_forceExtDict(source, dest,
  561. compressedSize, maxOutputSize,
  562. lz4sd->externalDict, lz4sd->extDictSize);
  563. if (result <= 0)
  564. return result;
  565. lz4sd->prefixSize = result;
  566. lz4sd->prefixEnd = (BYTE *)dest + result;
  567. }
  568. return result;
  569. }
  570. int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
  571. const char *source, char *dest, int originalSize)
  572. {
  573. LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
  574. int result;
  575. if (lz4sd->prefixSize == 0) {
  576. assert(lz4sd->extDictSize == 0);
  577. result = LZ4_decompress_fast(source, dest, originalSize);
  578. if (result <= 0)
  579. return result;
  580. lz4sd->prefixSize = originalSize;
  581. lz4sd->prefixEnd = (BYTE *)dest + originalSize;
  582. } else if (lz4sd->prefixEnd == (BYTE *)dest) {
  583. if (lz4sd->prefixSize >= 64 * KB - 1 ||
  584. lz4sd->extDictSize == 0)
  585. result = LZ4_decompress_fast(source, dest,
  586. originalSize);
  587. else
  588. result = LZ4_decompress_fast_doubleDict(source, dest,
  589. originalSize, lz4sd->prefixSize,
  590. lz4sd->externalDict, lz4sd->extDictSize);
  591. if (result <= 0)
  592. return result;
  593. lz4sd->prefixSize += originalSize;
  594. lz4sd->prefixEnd += originalSize;
  595. } else {
  596. lz4sd->extDictSize = lz4sd->prefixSize;
  597. lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
  598. result = LZ4_decompress_fast_extDict(source, dest,
  599. originalSize, lz4sd->externalDict, lz4sd->extDictSize);
  600. if (result <= 0)
  601. return result;
  602. lz4sd->prefixSize = originalSize;
  603. lz4sd->prefixEnd = (BYTE *)dest + originalSize;
  604. }
  605. return result;
  606. }
  607. int LZ4_decompress_safe_usingDict(const char *source, char *dest,
  608. int compressedSize, int maxOutputSize,
  609. const char *dictStart, int dictSize)
  610. {
  611. if (dictSize == 0)
  612. return LZ4_decompress_safe(source, dest,
  613. compressedSize, maxOutputSize);
  614. if (dictStart+dictSize == dest) {
  615. if (dictSize >= 64 * KB - 1)
  616. return LZ4_decompress_safe_withPrefix64k(source, dest,
  617. compressedSize, maxOutputSize);
  618. return LZ4_decompress_safe_withSmallPrefix(source, dest,
  619. compressedSize, maxOutputSize, dictSize);
  620. }
  621. return LZ4_decompress_safe_forceExtDict(source, dest,
  622. compressedSize, maxOutputSize, dictStart, dictSize);
  623. }
  624. int LZ4_decompress_fast_usingDict(const char *source, char *dest,
  625. int originalSize,
  626. const char *dictStart, int dictSize)
  627. {
  628. if (dictSize == 0 || dictStart + dictSize == dest)
  629. return LZ4_decompress_fast(source, dest, originalSize);
  630. return LZ4_decompress_fast_extDict(source, dest, originalSize,
  631. dictStart, dictSize);
  632. }
  633. #ifndef STATIC
  634. EXPORT_SYMBOL(LZ4_decompress_safe);
  635. EXPORT_SYMBOL(LZ4_decompress_safe_partial);
  636. EXPORT_SYMBOL(LZ4_decompress_fast);
  637. EXPORT_SYMBOL(LZ4_setStreamDecode);
  638. EXPORT_SYMBOL(LZ4_decompress_safe_continue);
  639. EXPORT_SYMBOL(LZ4_decompress_fast_continue);
  640. EXPORT_SYMBOL(LZ4_decompress_safe_usingDict);
  641. EXPORT_SYMBOL(LZ4_decompress_fast_usingDict);
  642. MODULE_LICENSE("Dual BSD/GPL");
  643. MODULE_DESCRIPTION("LZ4 decompressor");
  644. #endif