lz4hc_compress.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768
  1. /*
  2. * LZ4 HC - High Compression Mode of LZ4
  3. * Copyright (C) 2011-2015, Yann Collet.
  4. *
  5. * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted provided that the following conditions are
  8. * met:
  9. * * Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. * * Redistributions in binary form must reproduce the above
  12. * copyright notice, this list of conditions and the following disclaimer
  13. * in the documentation and/or other materials provided with the
  14. * distribution.
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  16. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  17. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  18. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  19. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  20. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  21. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  22. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  23. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  24. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  25. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26. * You can contact the author at :
  27. * - LZ4 homepage : http://www.lz4.org
  28. * - LZ4 source repository : https://github.com/lz4/lz4
  29. *
  30. * Changed for kernel usage by:
  31. * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
  32. */
  33. /*-************************************
  34. * Dependencies
  35. **************************************/
  36. #include <linux/lz4.h>
  37. #include "lz4defs.h"
  38. #include <linux/module.h>
  39. #include <linux/kernel.h>
  40. #include <linux/string.h> /* memset */
  41. /* *************************************
  42. * Local Constants and types
  43. ***************************************/
  44. #define OPTIMAL_ML (int)((ML_MASK - 1) + MINMATCH)
  45. #define HASH_FUNCTION(i) (((i) * 2654435761U) \
  46. >> ((MINMATCH*8) - LZ4HC_HASH_LOG))
  47. #define DELTANEXTU16(p) chainTable[(U16)(p)] /* faster */
  48. static U32 LZ4HC_hashPtr(const void *ptr)
  49. {
  50. return HASH_FUNCTION(LZ4_read32(ptr));
  51. }
  52. /**************************************
  53. * HC Compression
  54. **************************************/
  55. static void LZ4HC_init(LZ4HC_CCtx_internal *hc4, const BYTE *start)
  56. {
  57. memset((void *)hc4->hashTable, 0, sizeof(hc4->hashTable));
  58. memset(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
  59. hc4->nextToUpdate = 64 * KB;
  60. hc4->base = start - 64 * KB;
  61. hc4->end = start;
  62. hc4->dictBase = start - 64 * KB;
  63. hc4->dictLimit = 64 * KB;
  64. hc4->lowLimit = 64 * KB;
  65. }
  66. /* Update chains up to ip (excluded) */
  67. static FORCE_INLINE void LZ4HC_Insert(LZ4HC_CCtx_internal *hc4,
  68. const BYTE *ip)
  69. {
  70. U16 * const chainTable = hc4->chainTable;
  71. U32 * const hashTable = hc4->hashTable;
  72. const BYTE * const base = hc4->base;
  73. U32 const target = (U32)(ip - base);
  74. U32 idx = hc4->nextToUpdate;
  75. while (idx < target) {
  76. U32 const h = LZ4HC_hashPtr(base + idx);
  77. size_t delta = idx - hashTable[h];
  78. if (delta > MAX_DISTANCE)
  79. delta = MAX_DISTANCE;
  80. DELTANEXTU16(idx) = (U16)delta;
  81. hashTable[h] = idx;
  82. idx++;
  83. }
  84. hc4->nextToUpdate = target;
  85. }
  86. static FORCE_INLINE int LZ4HC_InsertAndFindBestMatch(
  87. LZ4HC_CCtx_internal *hc4, /* Index table will be updated */
  88. const BYTE *ip,
  89. const BYTE * const iLimit,
  90. const BYTE **matchpos,
  91. const int maxNbAttempts)
  92. {
  93. U16 * const chainTable = hc4->chainTable;
  94. U32 * const HashTable = hc4->hashTable;
  95. const BYTE * const base = hc4->base;
  96. const BYTE * const dictBase = hc4->dictBase;
  97. const U32 dictLimit = hc4->dictLimit;
  98. const U32 lowLimit = (hc4->lowLimit + 64 * KB > (U32)(ip - base))
  99. ? hc4->lowLimit
  100. : (U32)(ip - base) - (64 * KB - 1);
  101. U32 matchIndex;
  102. int nbAttempts = maxNbAttempts;
  103. size_t ml = 0;
  104. /* HC4 match finder */
  105. LZ4HC_Insert(hc4, ip);
  106. matchIndex = HashTable[LZ4HC_hashPtr(ip)];
  107. while ((matchIndex >= lowLimit)
  108. && (nbAttempts)) {
  109. nbAttempts--;
  110. if (matchIndex >= dictLimit) {
  111. const BYTE * const match = base + matchIndex;
  112. if (*(match + ml) == *(ip + ml)
  113. && (LZ4_read32(match) == LZ4_read32(ip))) {
  114. size_t const mlt = LZ4_count(ip + MINMATCH,
  115. match + MINMATCH, iLimit) + MINMATCH;
  116. if (mlt > ml) {
  117. ml = mlt;
  118. *matchpos = match;
  119. }
  120. }
  121. } else {
  122. const BYTE * const match = dictBase + matchIndex;
  123. if (LZ4_read32(match) == LZ4_read32(ip)) {
  124. size_t mlt;
  125. const BYTE *vLimit = ip
  126. + (dictLimit - matchIndex);
  127. if (vLimit > iLimit)
  128. vLimit = iLimit;
  129. mlt = LZ4_count(ip + MINMATCH,
  130. match + MINMATCH, vLimit) + MINMATCH;
  131. if ((ip + mlt == vLimit)
  132. && (vLimit < iLimit))
  133. mlt += LZ4_count(ip + mlt,
  134. base + dictLimit,
  135. iLimit);
  136. if (mlt > ml) {
  137. /* virtual matchpos */
  138. ml = mlt;
  139. *matchpos = base + matchIndex;
  140. }
  141. }
  142. }
  143. matchIndex -= DELTANEXTU16(matchIndex);
  144. }
  145. return (int)ml;
  146. }
  147. static FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch(
  148. LZ4HC_CCtx_internal *hc4,
  149. const BYTE * const ip,
  150. const BYTE * const iLowLimit,
  151. const BYTE * const iHighLimit,
  152. int longest,
  153. const BYTE **matchpos,
  154. const BYTE **startpos,
  155. const int maxNbAttempts)
  156. {
  157. U16 * const chainTable = hc4->chainTable;
  158. U32 * const HashTable = hc4->hashTable;
  159. const BYTE * const base = hc4->base;
  160. const U32 dictLimit = hc4->dictLimit;
  161. const BYTE * const lowPrefixPtr = base + dictLimit;
  162. const U32 lowLimit = (hc4->lowLimit + 64 * KB > (U32)(ip - base))
  163. ? hc4->lowLimit
  164. : (U32)(ip - base) - (64 * KB - 1);
  165. const BYTE * const dictBase = hc4->dictBase;
  166. U32 matchIndex;
  167. int nbAttempts = maxNbAttempts;
  168. int delta = (int)(ip - iLowLimit);
  169. /* First Match */
  170. LZ4HC_Insert(hc4, ip);
  171. matchIndex = HashTable[LZ4HC_hashPtr(ip)];
  172. while ((matchIndex >= lowLimit)
  173. && (nbAttempts)) {
  174. nbAttempts--;
  175. if (matchIndex >= dictLimit) {
  176. const BYTE *matchPtr = base + matchIndex;
  177. if (*(iLowLimit + longest)
  178. == *(matchPtr - delta + longest)) {
  179. if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
  180. int mlt = MINMATCH + LZ4_count(
  181. ip + MINMATCH,
  182. matchPtr + MINMATCH,
  183. iHighLimit);
  184. int back = 0;
  185. while ((ip + back > iLowLimit)
  186. && (matchPtr + back > lowPrefixPtr)
  187. && (ip[back - 1] == matchPtr[back - 1]))
  188. back--;
  189. mlt -= back;
  190. if (mlt > longest) {
  191. longest = (int)mlt;
  192. *matchpos = matchPtr + back;
  193. *startpos = ip + back;
  194. }
  195. }
  196. }
  197. } else {
  198. const BYTE * const matchPtr = dictBase + matchIndex;
  199. if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
  200. size_t mlt;
  201. int back = 0;
  202. const BYTE *vLimit = ip + (dictLimit - matchIndex);
  203. if (vLimit > iHighLimit)
  204. vLimit = iHighLimit;
  205. mlt = LZ4_count(ip + MINMATCH,
  206. matchPtr + MINMATCH, vLimit) + MINMATCH;
  207. if ((ip + mlt == vLimit) && (vLimit < iHighLimit))
  208. mlt += LZ4_count(ip + mlt, base + dictLimit,
  209. iHighLimit);
  210. while ((ip + back > iLowLimit)
  211. && (matchIndex + back > lowLimit)
  212. && (ip[back - 1] == matchPtr[back - 1]))
  213. back--;
  214. mlt -= back;
  215. if ((int)mlt > longest) {
  216. longest = (int)mlt;
  217. *matchpos = base + matchIndex + back;
  218. *startpos = ip + back;
  219. }
  220. }
  221. }
  222. matchIndex -= DELTANEXTU16(matchIndex);
  223. }
  224. return longest;
  225. }
  226. static FORCE_INLINE int LZ4HC_encodeSequence(
  227. const BYTE **ip,
  228. BYTE **op,
  229. const BYTE **anchor,
  230. int matchLength,
  231. const BYTE * const match,
  232. limitedOutput_directive limitedOutputBuffer,
  233. BYTE *oend)
  234. {
  235. int length;
  236. BYTE *token;
  237. /* Encode Literal length */
  238. length = (int)(*ip - *anchor);
  239. token = (*op)++;
  240. if ((limitedOutputBuffer)
  241. && ((*op + (length>>8)
  242. + length + (2 + 1 + LASTLITERALS)) > oend)) {
  243. /* Check output limit */
  244. return 1;
  245. }
  246. if (length >= (int)RUN_MASK) {
  247. int len;
  248. *token = (RUN_MASK<<ML_BITS);
  249. len = length - RUN_MASK;
  250. for (; len > 254 ; len -= 255)
  251. *(*op)++ = 255;
  252. *(*op)++ = (BYTE)len;
  253. } else
  254. *token = (BYTE)(length<<ML_BITS);
  255. /* Copy Literals */
  256. LZ4_wildCopy(*op, *anchor, (*op) + length);
  257. *op += length;
  258. /* Encode Offset */
  259. LZ4_writeLE16(*op, (U16)(*ip - match));
  260. *op += 2;
  261. /* Encode MatchLength */
  262. length = (int)(matchLength - MINMATCH);
  263. if ((limitedOutputBuffer)
  264. && (*op + (length>>8)
  265. + (1 + LASTLITERALS) > oend)) {
  266. /* Check output limit */
  267. return 1;
  268. }
  269. if (length >= (int)ML_MASK) {
  270. *token += ML_MASK;
  271. length -= ML_MASK;
  272. for (; length > 509 ; length -= 510) {
  273. *(*op)++ = 255;
  274. *(*op)++ = 255;
  275. }
  276. if (length > 254) {
  277. length -= 255;
  278. *(*op)++ = 255;
  279. }
  280. *(*op)++ = (BYTE)length;
  281. } else
  282. *token += (BYTE)(length);
  283. /* Prepare next loop */
  284. *ip += matchLength;
  285. *anchor = *ip;
  286. return 0;
  287. }
  288. static int LZ4HC_compress_generic(
  289. LZ4HC_CCtx_internal *const ctx,
  290. const char * const source,
  291. char * const dest,
  292. int const inputSize,
  293. int const maxOutputSize,
  294. int compressionLevel,
  295. limitedOutput_directive limit
  296. )
  297. {
  298. const BYTE *ip = (const BYTE *) source;
  299. const BYTE *anchor = ip;
  300. const BYTE * const iend = ip + inputSize;
  301. const BYTE * const mflimit = iend - MFLIMIT;
  302. const BYTE * const matchlimit = (iend - LASTLITERALS);
  303. BYTE *op = (BYTE *) dest;
  304. BYTE * const oend = op + maxOutputSize;
  305. unsigned int maxNbAttempts;
  306. int ml, ml2, ml3, ml0;
  307. const BYTE *ref = NULL;
  308. const BYTE *start2 = NULL;
  309. const BYTE *ref2 = NULL;
  310. const BYTE *start3 = NULL;
  311. const BYTE *ref3 = NULL;
  312. const BYTE *start0;
  313. const BYTE *ref0;
  314. /* init */
  315. if (compressionLevel > LZ4HC_MAX_CLEVEL)
  316. compressionLevel = LZ4HC_MAX_CLEVEL;
  317. if (compressionLevel < 1)
  318. compressionLevel = LZ4HC_DEFAULT_CLEVEL;
  319. maxNbAttempts = 1 << (compressionLevel - 1);
  320. ctx->end += inputSize;
  321. ip++;
  322. /* Main Loop */
  323. while (ip < mflimit) {
  324. ml = LZ4HC_InsertAndFindBestMatch(ctx, ip,
  325. matchlimit, (&ref), maxNbAttempts);
  326. if (!ml) {
  327. ip++;
  328. continue;
  329. }
  330. /* saved, in case we would skip too much */
  331. start0 = ip;
  332. ref0 = ref;
  333. ml0 = ml;
  334. _Search2:
  335. if (ip + ml < mflimit)
  336. ml2 = LZ4HC_InsertAndGetWiderMatch(ctx,
  337. ip + ml - 2, ip + 0,
  338. matchlimit, ml, &ref2,
  339. &start2, maxNbAttempts);
  340. else
  341. ml2 = ml;
  342. if (ml2 == ml) {
  343. /* No better match */
  344. if (LZ4HC_encodeSequence(&ip, &op,
  345. &anchor, ml, ref, limit, oend))
  346. return 0;
  347. continue;
  348. }
  349. if (start0 < ip) {
  350. if (start2 < ip + ml0) {
  351. /* empirical */
  352. ip = start0;
  353. ref = ref0;
  354. ml = ml0;
  355. }
  356. }
  357. /* Here, start0 == ip */
  358. if ((start2 - ip) < 3) {
  359. /* First Match too small : removed */
  360. ml = ml2;
  361. ip = start2;
  362. ref = ref2;
  363. goto _Search2;
  364. }
  365. _Search3:
  366. /*
  367. * Currently we have :
  368. * ml2 > ml1, and
  369. * ip1 + 3 <= ip2 (usually < ip1 + ml1)
  370. */
  371. if ((start2 - ip) < OPTIMAL_ML) {
  372. int correction;
  373. int new_ml = ml;
  374. if (new_ml > OPTIMAL_ML)
  375. new_ml = OPTIMAL_ML;
  376. if (ip + new_ml > start2 + ml2 - MINMATCH)
  377. new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
  378. correction = new_ml - (int)(start2 - ip);
  379. if (correction > 0) {
  380. start2 += correction;
  381. ref2 += correction;
  382. ml2 -= correction;
  383. }
  384. }
  385. /*
  386. * Now, we have start2 = ip + new_ml,
  387. * with new_ml = min(ml, OPTIMAL_ML = 18)
  388. */
  389. if (start2 + ml2 < mflimit)
  390. ml3 = LZ4HC_InsertAndGetWiderMatch(ctx,
  391. start2 + ml2 - 3, start2,
  392. matchlimit, ml2, &ref3, &start3,
  393. maxNbAttempts);
  394. else
  395. ml3 = ml2;
  396. if (ml3 == ml2) {
  397. /* No better match : 2 sequences to encode */
  398. /* ip & ref are known; Now for ml */
  399. if (start2 < ip + ml)
  400. ml = (int)(start2 - ip);
  401. /* Now, encode 2 sequences */
  402. if (LZ4HC_encodeSequence(&ip, &op, &anchor,
  403. ml, ref, limit, oend))
  404. return 0;
  405. ip = start2;
  406. if (LZ4HC_encodeSequence(&ip, &op, &anchor,
  407. ml2, ref2, limit, oend))
  408. return 0;
  409. continue;
  410. }
  411. if (start3 < ip + ml + 3) {
  412. /* Not enough space for match 2 : remove it */
  413. if (start3 >= (ip + ml)) {
  414. /* can write Seq1 immediately
  415. * ==> Seq2 is removed,
  416. * so Seq3 becomes Seq1
  417. */
  418. if (start2 < ip + ml) {
  419. int correction = (int)(ip + ml - start2);
  420. start2 += correction;
  421. ref2 += correction;
  422. ml2 -= correction;
  423. if (ml2 < MINMATCH) {
  424. start2 = start3;
  425. ref2 = ref3;
  426. ml2 = ml3;
  427. }
  428. }
  429. if (LZ4HC_encodeSequence(&ip, &op, &anchor,
  430. ml, ref, limit, oend))
  431. return 0;
  432. ip = start3;
  433. ref = ref3;
  434. ml = ml3;
  435. start0 = start2;
  436. ref0 = ref2;
  437. ml0 = ml2;
  438. goto _Search2;
  439. }
  440. start2 = start3;
  441. ref2 = ref3;
  442. ml2 = ml3;
  443. goto _Search3;
  444. }
  445. /*
  446. * OK, now we have 3 ascending matches;
  447. * let's write at least the first one
  448. * ip & ref are known; Now for ml
  449. */
  450. if (start2 < ip + ml) {
  451. if ((start2 - ip) < (int)ML_MASK) {
  452. int correction;
  453. if (ml > OPTIMAL_ML)
  454. ml = OPTIMAL_ML;
  455. if (ip + ml > start2 + ml2 - MINMATCH)
  456. ml = (int)(start2 - ip) + ml2 - MINMATCH;
  457. correction = ml - (int)(start2 - ip);
  458. if (correction > 0) {
  459. start2 += correction;
  460. ref2 += correction;
  461. ml2 -= correction;
  462. }
  463. } else
  464. ml = (int)(start2 - ip);
  465. }
  466. if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml,
  467. ref, limit, oend))
  468. return 0;
  469. ip = start2;
  470. ref = ref2;
  471. ml = ml2;
  472. start2 = start3;
  473. ref2 = ref3;
  474. ml2 = ml3;
  475. goto _Search3;
  476. }
  477. /* Encode Last Literals */
  478. {
  479. int lastRun = (int)(iend - anchor);
  480. if ((limit)
  481. && (((char *)op - dest) + lastRun + 1
  482. + ((lastRun + 255 - RUN_MASK)/255)
  483. > (U32)maxOutputSize)) {
  484. /* Check output limit */
  485. return 0;
  486. }
  487. if (lastRun >= (int)RUN_MASK) {
  488. *op++ = (RUN_MASK<<ML_BITS);
  489. lastRun -= RUN_MASK;
  490. for (; lastRun > 254 ; lastRun -= 255)
  491. *op++ = 255;
  492. *op++ = (BYTE) lastRun;
  493. } else
  494. *op++ = (BYTE)(lastRun<<ML_BITS);
  495. LZ4_memcpy(op, anchor, iend - anchor);
  496. op += iend - anchor;
  497. }
  498. /* End */
  499. return (int) (((char *)op) - dest);
  500. }
  501. static int LZ4_compress_HC_extStateHC(
  502. void *state,
  503. const char *src,
  504. char *dst,
  505. int srcSize,
  506. int maxDstSize,
  507. int compressionLevel)
  508. {
  509. LZ4HC_CCtx_internal *ctx = &((LZ4_streamHC_t *)state)->internal_donotuse;
  510. if (((size_t)(state)&(sizeof(void *) - 1)) != 0) {
  511. /* Error : state is not aligned
  512. * for pointers (32 or 64 bits)
  513. */
  514. return 0;
  515. }
  516. LZ4HC_init(ctx, (const BYTE *)src);
  517. if (maxDstSize < LZ4_compressBound(srcSize))
  518. return LZ4HC_compress_generic(ctx, src, dst,
  519. srcSize, maxDstSize, compressionLevel, limitedOutput);
  520. else
  521. return LZ4HC_compress_generic(ctx, src, dst,
  522. srcSize, maxDstSize, compressionLevel, noLimit);
  523. }
  524. int LZ4_compress_HC(const char *src, char *dst, int srcSize,
  525. int maxDstSize, int compressionLevel, void *wrkmem)
  526. {
  527. return LZ4_compress_HC_extStateHC(wrkmem, src, dst,
  528. srcSize, maxDstSize, compressionLevel);
  529. }
  530. EXPORT_SYMBOL(LZ4_compress_HC);
  531. /**************************************
  532. * Streaming Functions
  533. **************************************/
  534. void LZ4_resetStreamHC(LZ4_streamHC_t *LZ4_streamHCPtr, int compressionLevel)
  535. {
  536. LZ4_streamHCPtr->internal_donotuse.base = NULL;
  537. LZ4_streamHCPtr->internal_donotuse.compressionLevel = (unsigned int)compressionLevel;
  538. }
  539. int LZ4_loadDictHC(LZ4_streamHC_t *LZ4_streamHCPtr,
  540. const char *dictionary,
  541. int dictSize)
  542. {
  543. LZ4HC_CCtx_internal *ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
  544. if (dictSize > 64 * KB) {
  545. dictionary += dictSize - 64 * KB;
  546. dictSize = 64 * KB;
  547. }
  548. LZ4HC_init(ctxPtr, (const BYTE *)dictionary);
  549. if (dictSize >= 4)
  550. LZ4HC_Insert(ctxPtr, (const BYTE *)dictionary + (dictSize - 3));
  551. ctxPtr->end = (const BYTE *)dictionary + dictSize;
  552. return dictSize;
  553. }
  554. EXPORT_SYMBOL(LZ4_loadDictHC);
  555. /* compression */
  556. static void LZ4HC_setExternalDict(
  557. LZ4HC_CCtx_internal *ctxPtr,
  558. const BYTE *newBlock)
  559. {
  560. if (ctxPtr->end >= ctxPtr->base + 4) {
  561. /* Referencing remaining dictionary content */
  562. LZ4HC_Insert(ctxPtr, ctxPtr->end - 3);
  563. }
  564. /*
  565. * Only one memory segment for extDict,
  566. * so any previous extDict is lost at this stage
  567. */
  568. ctxPtr->lowLimit = ctxPtr->dictLimit;
  569. ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base);
  570. ctxPtr->dictBase = ctxPtr->base;
  571. ctxPtr->base = newBlock - ctxPtr->dictLimit;
  572. ctxPtr->end = newBlock;
  573. /* match referencing will resume from there */
  574. ctxPtr->nextToUpdate = ctxPtr->dictLimit;
  575. }
  576. static int LZ4_compressHC_continue_generic(
  577. LZ4_streamHC_t *LZ4_streamHCPtr,
  578. const char *source,
  579. char *dest,
  580. int inputSize,
  581. int maxOutputSize,
  582. limitedOutput_directive limit)
  583. {
  584. LZ4HC_CCtx_internal *ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
  585. /* auto - init if forgotten */
  586. if (ctxPtr->base == NULL)
  587. LZ4HC_init(ctxPtr, (const BYTE *) source);
  588. /* Check overflow */
  589. if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 * GB) {
  590. size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base)
  591. - ctxPtr->dictLimit;
  592. if (dictSize > 64 * KB)
  593. dictSize = 64 * KB;
  594. LZ4_loadDictHC(LZ4_streamHCPtr,
  595. (const char *)(ctxPtr->end) - dictSize, (int)dictSize);
  596. }
  597. /* Check if blocks follow each other */
  598. if ((const BYTE *)source != ctxPtr->end)
  599. LZ4HC_setExternalDict(ctxPtr, (const BYTE *)source);
  600. /* Check overlapping input/dictionary space */
  601. {
  602. const BYTE *sourceEnd = (const BYTE *) source + inputSize;
  603. const BYTE * const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit;
  604. const BYTE * const dictEnd = ctxPtr->dictBase + ctxPtr->dictLimit;
  605. if ((sourceEnd > dictBegin)
  606. && ((const BYTE *)source < dictEnd)) {
  607. if (sourceEnd > dictEnd)
  608. sourceEnd = dictEnd;
  609. ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
  610. if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4)
  611. ctxPtr->lowLimit = ctxPtr->dictLimit;
  612. }
  613. }
  614. return LZ4HC_compress_generic(ctxPtr, source, dest,
  615. inputSize, maxOutputSize, ctxPtr->compressionLevel, limit);
  616. }
  617. int LZ4_compress_HC_continue(
  618. LZ4_streamHC_t *LZ4_streamHCPtr,
  619. const char *source,
  620. char *dest,
  621. int inputSize,
  622. int maxOutputSize)
  623. {
  624. if (maxOutputSize < LZ4_compressBound(inputSize))
  625. return LZ4_compressHC_continue_generic(LZ4_streamHCPtr,
  626. source, dest, inputSize, maxOutputSize, limitedOutput);
  627. else
  628. return LZ4_compressHC_continue_generic(LZ4_streamHCPtr,
  629. source, dest, inputSize, maxOutputSize, noLimit);
  630. }
  631. EXPORT_SYMBOL(LZ4_compress_HC_continue);
  632. /* dictionary saving */
  633. int LZ4_saveDictHC(
  634. LZ4_streamHC_t *LZ4_streamHCPtr,
  635. char *safeBuffer,
  636. int dictSize)
  637. {
  638. LZ4HC_CCtx_internal *const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
  639. int const prefixSize = (int)(streamPtr->end
  640. - (streamPtr->base + streamPtr->dictLimit));
  641. if (dictSize > 64 * KB)
  642. dictSize = 64 * KB;
  643. if (dictSize < 4)
  644. dictSize = 0;
  645. if (dictSize > prefixSize)
  646. dictSize = prefixSize;
  647. memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
  648. {
  649. U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
  650. streamPtr->end = (const BYTE *)safeBuffer + dictSize;
  651. streamPtr->base = streamPtr->end - endIndex;
  652. streamPtr->dictLimit = endIndex - dictSize;
  653. streamPtr->lowLimit = endIndex - dictSize;
  654. if (streamPtr->nextToUpdate < streamPtr->dictLimit)
  655. streamPtr->nextToUpdate = streamPtr->dictLimit;
  656. }
  657. return dictSize;
  658. }
  659. EXPORT_SYMBOL(LZ4_saveDictHC);
  660. MODULE_LICENSE("Dual BSD/GPL");
  661. MODULE_DESCRIPTION("LZ4 HC compressor");