lzo1x_compress.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * LZO1X Compressor from LZO
  4. *
  5. * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com>
  6. *
  7. * The full LZO package can be found at:
  8. * http://www.oberhumer.com/opensource/lzo/
  9. *
  10. * Changed for Linux kernel use by:
  11. * Nitin Gupta <nitingupta910@gmail.com>
  12. * Richard Purdie <rpurdie@openedhand.com>
  13. */
  14. #include <linux/module.h>
  15. #include <linux/kernel.h>
  16. #include <asm/unaligned.h>
  17. #include <linux/lzo.h>
  18. #include "lzodefs.h"
  19. static noinline size_t
  20. lzo1x_1_do_compress(const unsigned char *in, size_t in_len,
  21. unsigned char *out, size_t *out_len,
  22. size_t ti, void *wrkmem, signed char *state_offset,
  23. const unsigned char bitstream_version)
  24. {
  25. const unsigned char *ip;
  26. unsigned char *op;
  27. const unsigned char * const in_end = in + in_len;
  28. const unsigned char * const ip_end = in + in_len - 20;
  29. const unsigned char *ii;
  30. lzo_dict_t * const dict = (lzo_dict_t *) wrkmem;
  31. op = out;
  32. ip = in;
  33. ii = ip;
  34. ip += ti < 4 ? 4 - ti : 0;
  35. for (;;) {
  36. const unsigned char *m_pos = NULL;
  37. size_t t, m_len, m_off;
  38. u32 dv;
  39. u32 run_length = 0;
  40. literal:
  41. ip += 1 + ((ip - ii) >> 5);
  42. next:
  43. if (unlikely(ip >= ip_end))
  44. break;
  45. dv = get_unaligned_le32(ip);
  46. if (dv == 0 && bitstream_version) {
  47. const unsigned char *ir = ip + 4;
  48. const unsigned char *limit = ip_end
  49. < (ip + MAX_ZERO_RUN_LENGTH + 1)
  50. ? ip_end : ip + MAX_ZERO_RUN_LENGTH + 1;
  51. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \
  52. defined(LZO_FAST_64BIT_MEMORY_ACCESS)
  53. u64 dv64;
  54. for (; (ir + 32) <= limit; ir += 32) {
  55. dv64 = get_unaligned((u64 *)ir);
  56. dv64 |= get_unaligned((u64 *)ir + 1);
  57. dv64 |= get_unaligned((u64 *)ir + 2);
  58. dv64 |= get_unaligned((u64 *)ir + 3);
  59. if (dv64)
  60. break;
  61. }
  62. for (; (ir + 8) <= limit; ir += 8) {
  63. dv64 = get_unaligned((u64 *)ir);
  64. if (dv64) {
  65. # if defined(__LITTLE_ENDIAN)
  66. ir += __builtin_ctzll(dv64) >> 3;
  67. # elif defined(__BIG_ENDIAN)
  68. ir += __builtin_clzll(dv64) >> 3;
  69. # else
  70. # error "missing endian definition"
  71. # endif
  72. break;
  73. }
  74. }
  75. #else
  76. while ((ir < (const unsigned char *)
  77. ALIGN((uintptr_t)ir, 4)) &&
  78. (ir < limit) && (*ir == 0))
  79. ir++;
  80. if (IS_ALIGNED((uintptr_t)ir, 4)) {
  81. for (; (ir + 4) <= limit; ir += 4) {
  82. dv = *((u32 *)ir);
  83. if (dv) {
  84. # if defined(__LITTLE_ENDIAN)
  85. ir += __builtin_ctz(dv) >> 3;
  86. # elif defined(__BIG_ENDIAN)
  87. ir += __builtin_clz(dv) >> 3;
  88. # else
  89. # error "missing endian definition"
  90. # endif
  91. break;
  92. }
  93. }
  94. }
  95. #endif
  96. while (likely(ir < limit) && unlikely(*ir == 0))
  97. ir++;
  98. run_length = ir - ip;
  99. if (run_length > MAX_ZERO_RUN_LENGTH)
  100. run_length = MAX_ZERO_RUN_LENGTH;
  101. } else {
  102. t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK;
  103. m_pos = in + dict[t];
  104. dict[t] = (lzo_dict_t) (ip - in);
  105. if (unlikely(dv != get_unaligned_le32(m_pos)))
  106. goto literal;
  107. }
  108. ii -= ti;
  109. ti = 0;
  110. t = ip - ii;
  111. if (t != 0) {
  112. if (t <= 3) {
  113. op[*state_offset] |= t;
  114. COPY4(op, ii);
  115. op += t;
  116. } else if (t <= 16) {
  117. *op++ = (t - 3);
  118. COPY8(op, ii);
  119. COPY8(op + 8, ii + 8);
  120. op += t;
  121. } else {
  122. if (t <= 18) {
  123. *op++ = (t - 3);
  124. } else {
  125. size_t tt = t - 18;
  126. *op++ = 0;
  127. while (unlikely(tt > 255)) {
  128. tt -= 255;
  129. *op++ = 0;
  130. }
  131. *op++ = tt;
  132. }
  133. do {
  134. COPY8(op, ii);
  135. COPY8(op + 8, ii + 8);
  136. op += 16;
  137. ii += 16;
  138. t -= 16;
  139. } while (t >= 16);
  140. if (t > 0) do {
  141. *op++ = *ii++;
  142. } while (--t > 0);
  143. }
  144. }
  145. if (unlikely(run_length)) {
  146. ip += run_length;
  147. run_length -= MIN_ZERO_RUN_LENGTH;
  148. put_unaligned_le32((run_length << 21) | 0xfffc18
  149. | (run_length & 0x7), op);
  150. op += 4;
  151. run_length = 0;
  152. *state_offset = -3;
  153. goto finished_writing_instruction;
  154. }
  155. m_len = 4;
  156. {
  157. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ64)
  158. u64 v;
  159. v = get_unaligned((const u64 *) (ip + m_len)) ^
  160. get_unaligned((const u64 *) (m_pos + m_len));
  161. if (unlikely(v == 0)) {
  162. do {
  163. m_len += 8;
  164. v = get_unaligned((const u64 *) (ip + m_len)) ^
  165. get_unaligned((const u64 *) (m_pos + m_len));
  166. if (unlikely(ip + m_len >= ip_end))
  167. goto m_len_done;
  168. } while (v == 0);
  169. }
  170. # if defined(__LITTLE_ENDIAN)
  171. m_len += (unsigned) __builtin_ctzll(v) / 8;
  172. # elif defined(__BIG_ENDIAN)
  173. m_len += (unsigned) __builtin_clzll(v) / 8;
  174. # else
  175. # error "missing endian definition"
  176. # endif
  177. #elif defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ32)
  178. u32 v;
  179. v = get_unaligned((const u32 *) (ip + m_len)) ^
  180. get_unaligned((const u32 *) (m_pos + m_len));
  181. if (unlikely(v == 0)) {
  182. do {
  183. m_len += 4;
  184. v = get_unaligned((const u32 *) (ip + m_len)) ^
  185. get_unaligned((const u32 *) (m_pos + m_len));
  186. if (v != 0)
  187. break;
  188. m_len += 4;
  189. v = get_unaligned((const u32 *) (ip + m_len)) ^
  190. get_unaligned((const u32 *) (m_pos + m_len));
  191. if (unlikely(ip + m_len >= ip_end))
  192. goto m_len_done;
  193. } while (v == 0);
  194. }
  195. # if defined(__LITTLE_ENDIAN)
  196. m_len += (unsigned) __builtin_ctz(v) / 8;
  197. # elif defined(__BIG_ENDIAN)
  198. m_len += (unsigned) __builtin_clz(v) / 8;
  199. # else
  200. # error "missing endian definition"
  201. # endif
  202. #else
  203. if (unlikely(ip[m_len] == m_pos[m_len])) {
  204. do {
  205. m_len += 1;
  206. if (ip[m_len] != m_pos[m_len])
  207. break;
  208. m_len += 1;
  209. if (ip[m_len] != m_pos[m_len])
  210. break;
  211. m_len += 1;
  212. if (ip[m_len] != m_pos[m_len])
  213. break;
  214. m_len += 1;
  215. if (ip[m_len] != m_pos[m_len])
  216. break;
  217. m_len += 1;
  218. if (ip[m_len] != m_pos[m_len])
  219. break;
  220. m_len += 1;
  221. if (ip[m_len] != m_pos[m_len])
  222. break;
  223. m_len += 1;
  224. if (ip[m_len] != m_pos[m_len])
  225. break;
  226. m_len += 1;
  227. if (unlikely(ip + m_len >= ip_end))
  228. goto m_len_done;
  229. } while (ip[m_len] == m_pos[m_len]);
  230. }
  231. #endif
  232. }
  233. m_len_done:
  234. m_off = ip - m_pos;
  235. ip += m_len;
  236. if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) {
  237. m_off -= 1;
  238. *op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2));
  239. *op++ = (m_off >> 3);
  240. } else if (m_off <= M3_MAX_OFFSET) {
  241. m_off -= 1;
  242. if (m_len <= M3_MAX_LEN)
  243. *op++ = (M3_MARKER | (m_len - 2));
  244. else {
  245. m_len -= M3_MAX_LEN;
  246. *op++ = M3_MARKER | 0;
  247. while (unlikely(m_len > 255)) {
  248. m_len -= 255;
  249. *op++ = 0;
  250. }
  251. *op++ = (m_len);
  252. }
  253. *op++ = (m_off << 2);
  254. *op++ = (m_off >> 6);
  255. } else {
  256. m_off -= 0x4000;
  257. if (m_len <= M4_MAX_LEN)
  258. *op++ = (M4_MARKER | ((m_off >> 11) & 8)
  259. | (m_len - 2));
  260. else {
  261. if (unlikely(((m_off & 0x403f) == 0x403f)
  262. && (m_len >= 261)
  263. && (m_len <= 264))
  264. && likely(bitstream_version)) {
  265. // Under lzo-rle, block copies
  266. // for 261 <= length <= 264 and
  267. // (distance & 0x80f3) == 0x80f3
  268. // can result in ambiguous
  269. // output. Adjust length
  270. // to 260 to prevent ambiguity.
  271. ip -= m_len - 260;
  272. m_len = 260;
  273. }
  274. m_len -= M4_MAX_LEN;
  275. *op++ = (M4_MARKER | ((m_off >> 11) & 8));
  276. while (unlikely(m_len > 255)) {
  277. m_len -= 255;
  278. *op++ = 0;
  279. }
  280. *op++ = (m_len);
  281. }
  282. *op++ = (m_off << 2);
  283. *op++ = (m_off >> 6);
  284. }
  285. *state_offset = -2;
  286. finished_writing_instruction:
  287. ii = ip;
  288. goto next;
  289. }
  290. *out_len = op - out;
  291. return in_end - (ii - ti);
  292. }
  293. int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
  294. unsigned char *out, size_t *out_len,
  295. void *wrkmem, const unsigned char bitstream_version)
  296. {
  297. const unsigned char *ip = in;
  298. unsigned char *op = out;
  299. unsigned char *data_start;
  300. size_t l = in_len;
  301. size_t t = 0;
  302. signed char state_offset = -2;
  303. unsigned int m4_max_offset;
  304. // LZO v0 will never write 17 as first byte (except for zero-length
  305. // input), so this is used to version the bitstream
  306. if (bitstream_version > 0) {
  307. *op++ = 17;
  308. *op++ = bitstream_version;
  309. m4_max_offset = M4_MAX_OFFSET_V1;
  310. } else {
  311. m4_max_offset = M4_MAX_OFFSET_V0;
  312. }
  313. data_start = op;
  314. while (l > 20) {
  315. size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1);
  316. uintptr_t ll_end = (uintptr_t) ip + ll;
  317. if ((ll_end + ((t + ll) >> 5)) <= ll_end)
  318. break;
  319. BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS);
  320. memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t));
  321. t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem,
  322. &state_offset, bitstream_version);
  323. ip += ll;
  324. op += *out_len;
  325. l -= ll;
  326. }
  327. t += l;
  328. if (t > 0) {
  329. const unsigned char *ii = in + in_len - t;
  330. if (op == data_start && t <= 238) {
  331. *op++ = (17 + t);
  332. } else if (t <= 3) {
  333. op[state_offset] |= t;
  334. } else if (t <= 18) {
  335. *op++ = (t - 3);
  336. } else {
  337. size_t tt = t - 18;
  338. *op++ = 0;
  339. while (tt > 255) {
  340. tt -= 255;
  341. *op++ = 0;
  342. }
  343. *op++ = tt;
  344. }
  345. if (t >= 16) do {
  346. COPY8(op, ii);
  347. COPY8(op + 8, ii + 8);
  348. op += 16;
  349. ii += 16;
  350. t -= 16;
  351. } while (t >= 16);
  352. if (t > 0) do {
  353. *op++ = *ii++;
  354. } while (--t > 0);
  355. }
  356. *op++ = M4_MARKER | 1;
  357. *op++ = 0;
  358. *op++ = 0;
  359. *out_len = op - out;
  360. return LZO_E_OK;
  361. }
  362. int lzo1x_1_compress(const unsigned char *in, size_t in_len,
  363. unsigned char *out, size_t *out_len,
  364. void *wrkmem)
  365. {
  366. return lzogeneric1x_1_compress(in, in_len, out, out_len, wrkmem, 0);
  367. }
  368. int lzorle1x_1_compress(const unsigned char *in, size_t in_len,
  369. unsigned char *out, size_t *out_len,
  370. void *wrkmem)
  371. {
  372. return lzogeneric1x_1_compress(in, in_len, out, out_len,
  373. wrkmem, LZO_VERSION);
  374. }
  375. EXPORT_SYMBOL_GPL(lzo1x_1_compress);
  376. EXPORT_SYMBOL_GPL(lzorle1x_1_compress);
  377. MODULE_LICENSE("GPL");
  378. MODULE_DESCRIPTION("LZO1X-1 Compressor");