crc_folding.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497
  1. /*
  2. * Compute the CRC32 using a parallelized folding approach with the PCLMULQDQ
  3. * instruction.
  4. *
  5. * A white paper describing this algorithm can be found at:
  6. * http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
  7. *
  8. * Copyright (C) 2013 Intel Corporation. All rights reserved.
  9. * Authors:
  10. * Wajdi Feghali <wajdi.k.feghali@intel.com>
  11. * Jim Guilford <james.guilford@intel.com>
  12. * Vinodh Gopal <vinodh.gopal@intel.com>
  13. * Erdinc Ozturk <erdinc.ozturk@intel.com>
  14. * Jim Kukunas <james.t.kukunas@linux.intel.com>
  15. *
  16. * For conditions of distribution and use, see copyright notice in zlib.h
  17. */
  18. #include "deflate.h"
  19. #ifdef CRC32_SIMD_SSE42_PCLMUL
  20. #include <inttypes.h>
  21. #include <emmintrin.h>
  22. #include <immintrin.h>
  23. #include <wmmintrin.h>
  24. #define CRC_LOAD(s) \
  25. do { \
  26. __m128i xmm_crc0 = _mm_loadu_si128((__m128i *)s->crc0 + 0);\
  27. __m128i xmm_crc1 = _mm_loadu_si128((__m128i *)s->crc0 + 1);\
  28. __m128i xmm_crc2 = _mm_loadu_si128((__m128i *)s->crc0 + 2);\
  29. __m128i xmm_crc3 = _mm_loadu_si128((__m128i *)s->crc0 + 3);\
  30. __m128i xmm_crc_part = _mm_loadu_si128((__m128i *)s->crc0 + 4);
  31. #define CRC_SAVE(s) \
  32. _mm_storeu_si128((__m128i *)s->crc0 + 0, xmm_crc0);\
  33. _mm_storeu_si128((__m128i *)s->crc0 + 1, xmm_crc1);\
  34. _mm_storeu_si128((__m128i *)s->crc0 + 2, xmm_crc2);\
  35. _mm_storeu_si128((__m128i *)s->crc0 + 3, xmm_crc3);\
  36. _mm_storeu_si128((__m128i *)s->crc0 + 4, xmm_crc_part);\
  37. } while (0);
  38. ZLIB_INTERNAL void crc_fold_init(deflate_state *const s)
  39. {
  40. CRC_LOAD(s)
  41. xmm_crc0 = _mm_cvtsi32_si128(0x9db42487);
  42. xmm_crc1 = _mm_setzero_si128();
  43. xmm_crc2 = _mm_setzero_si128();
  44. xmm_crc3 = _mm_setzero_si128();
  45. CRC_SAVE(s)
  46. s->strm->adler = 0;
  47. }
  48. local void fold_1(deflate_state *const s,
  49. __m128i *xmm_crc0, __m128i *xmm_crc1,
  50. __m128i *xmm_crc2, __m128i *xmm_crc3)
  51. {
  52. const __m128i xmm_fold4 = _mm_set_epi32(
  53. 0x00000001, 0x54442bd4,
  54. 0x00000001, 0xc6e41596);
  55. __m128i x_tmp3;
  56. __m128 ps_crc0, ps_crc3, ps_res;
  57. x_tmp3 = *xmm_crc3;
  58. *xmm_crc3 = *xmm_crc0;
  59. *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01);
  60. *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10);
  61. ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
  62. ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
  63. ps_res = _mm_xor_ps(ps_crc0, ps_crc3);
  64. *xmm_crc0 = *xmm_crc1;
  65. *xmm_crc1 = *xmm_crc2;
  66. *xmm_crc2 = x_tmp3;
  67. *xmm_crc3 = _mm_castps_si128(ps_res);
  68. }
  69. local void fold_2(deflate_state *const s,
  70. __m128i *xmm_crc0, __m128i *xmm_crc1,
  71. __m128i *xmm_crc2, __m128i *xmm_crc3)
  72. {
  73. const __m128i xmm_fold4 = _mm_set_epi32(
  74. 0x00000001, 0x54442bd4,
  75. 0x00000001, 0xc6e41596);
  76. __m128i x_tmp3, x_tmp2;
  77. __m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3, ps_res31, ps_res20;
  78. x_tmp3 = *xmm_crc3;
  79. x_tmp2 = *xmm_crc2;
  80. *xmm_crc3 = *xmm_crc1;
  81. *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01);
  82. *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10);
  83. ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
  84. ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
  85. ps_res31= _mm_xor_ps(ps_crc3, ps_crc1);
  86. *xmm_crc2 = *xmm_crc0;
  87. *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01);
  88. *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x10);
  89. ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
  90. ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
  91. ps_res20= _mm_xor_ps(ps_crc0, ps_crc2);
  92. *xmm_crc0 = x_tmp2;
  93. *xmm_crc1 = x_tmp3;
  94. *xmm_crc2 = _mm_castps_si128(ps_res20);
  95. *xmm_crc3 = _mm_castps_si128(ps_res31);
  96. }
  97. local void fold_3(deflate_state *const s,
  98. __m128i *xmm_crc0, __m128i *xmm_crc1,
  99. __m128i *xmm_crc2, __m128i *xmm_crc3)
  100. {
  101. const __m128i xmm_fold4 = _mm_set_epi32(
  102. 0x00000001, 0x54442bd4,
  103. 0x00000001, 0xc6e41596);
  104. __m128i x_tmp3;
  105. __m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3, ps_res32, ps_res21, ps_res10;
  106. x_tmp3 = *xmm_crc3;
  107. *xmm_crc3 = *xmm_crc2;
  108. *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x01);
  109. *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10);
  110. ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
  111. ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
  112. ps_res32 = _mm_xor_ps(ps_crc2, ps_crc3);
  113. *xmm_crc2 = *xmm_crc1;
  114. *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01);
  115. *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x10);
  116. ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
  117. ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
  118. ps_res21= _mm_xor_ps(ps_crc1, ps_crc2);
  119. *xmm_crc1 = *xmm_crc0;
  120. *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01);
  121. *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x10);
  122. ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
  123. ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
  124. ps_res10= _mm_xor_ps(ps_crc0, ps_crc1);
  125. *xmm_crc0 = x_tmp3;
  126. *xmm_crc1 = _mm_castps_si128(ps_res10);
  127. *xmm_crc2 = _mm_castps_si128(ps_res21);
  128. *xmm_crc3 = _mm_castps_si128(ps_res32);
  129. }
  130. local void fold_4(deflate_state *const s,
  131. __m128i *xmm_crc0, __m128i *xmm_crc1,
  132. __m128i *xmm_crc2, __m128i *xmm_crc3)
  133. {
  134. const __m128i xmm_fold4 = _mm_set_epi32(
  135. 0x00000001, 0x54442bd4,
  136. 0x00000001, 0xc6e41596);
  137. __m128i x_tmp0, x_tmp1, x_tmp2, x_tmp3;
  138. __m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3;
  139. __m128 ps_t0, ps_t1, ps_t2, ps_t3;
  140. __m128 ps_res0, ps_res1, ps_res2, ps_res3;
  141. x_tmp0 = *xmm_crc0;
  142. x_tmp1 = *xmm_crc1;
  143. x_tmp2 = *xmm_crc2;
  144. x_tmp3 = *xmm_crc3;
  145. *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01);
  146. x_tmp0 = _mm_clmulepi64_si128(x_tmp0, xmm_fold4, 0x10);
  147. ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
  148. ps_t0 = _mm_castsi128_ps(x_tmp0);
  149. ps_res0 = _mm_xor_ps(ps_crc0, ps_t0);
  150. *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01);
  151. x_tmp1 = _mm_clmulepi64_si128(x_tmp1, xmm_fold4, 0x10);
  152. ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
  153. ps_t1 = _mm_castsi128_ps(x_tmp1);
  154. ps_res1 = _mm_xor_ps(ps_crc1, ps_t1);
  155. *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x01);
  156. x_tmp2 = _mm_clmulepi64_si128(x_tmp2, xmm_fold4, 0x10);
  157. ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
  158. ps_t2 = _mm_castsi128_ps(x_tmp2);
  159. ps_res2 = _mm_xor_ps(ps_crc2, ps_t2);
  160. *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x01);
  161. x_tmp3 = _mm_clmulepi64_si128(x_tmp3, xmm_fold4, 0x10);
  162. ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
  163. ps_t3 = _mm_castsi128_ps(x_tmp3);
  164. ps_res3 = _mm_xor_ps(ps_crc3, ps_t3);
  165. *xmm_crc0 = _mm_castps_si128(ps_res0);
  166. *xmm_crc1 = _mm_castps_si128(ps_res1);
  167. *xmm_crc2 = _mm_castps_si128(ps_res2);
  168. *xmm_crc3 = _mm_castps_si128(ps_res3);
  169. }
  170. local const unsigned zalign(32) pshufb_shf_table[60] = {
  171. 0x84838281,0x88878685,0x8c8b8a89,0x008f8e8d, /* shl 15 (16 - 1)/shr1 */
  172. 0x85848382,0x89888786,0x8d8c8b8a,0x01008f8e, /* shl 14 (16 - 3)/shr2 */
  173. 0x86858483,0x8a898887,0x8e8d8c8b,0x0201008f, /* shl 13 (16 - 4)/shr3 */
  174. 0x87868584,0x8b8a8988,0x8f8e8d8c,0x03020100, /* shl 12 (16 - 4)/shr4 */
  175. 0x88878685,0x8c8b8a89,0x008f8e8d,0x04030201, /* shl 11 (16 - 5)/shr5 */
  176. 0x89888786,0x8d8c8b8a,0x01008f8e,0x05040302, /* shl 10 (16 - 6)/shr6 */
  177. 0x8a898887,0x8e8d8c8b,0x0201008f,0x06050403, /* shl 9 (16 - 7)/shr7 */
  178. 0x8b8a8988,0x8f8e8d8c,0x03020100,0x07060504, /* shl 8 (16 - 8)/shr8 */
  179. 0x8c8b8a89,0x008f8e8d,0x04030201,0x08070605, /* shl 7 (16 - 9)/shr9 */
  180. 0x8d8c8b8a,0x01008f8e,0x05040302,0x09080706, /* shl 6 (16 -10)/shr10*/
  181. 0x8e8d8c8b,0x0201008f,0x06050403,0x0a090807, /* shl 5 (16 -11)/shr11*/
  182. 0x8f8e8d8c,0x03020100,0x07060504,0x0b0a0908, /* shl 4 (16 -12)/shr12*/
  183. 0x008f8e8d,0x04030201,0x08070605,0x0c0b0a09, /* shl 3 (16 -13)/shr13*/
  184. 0x01008f8e,0x05040302,0x09080706,0x0d0c0b0a, /* shl 2 (16 -14)/shr14*/
  185. 0x0201008f,0x06050403,0x0a090807,0x0e0d0c0b /* shl 1 (16 -15)/shr15*/
  186. };
  187. local void partial_fold(deflate_state *const s, const size_t len,
  188. __m128i *xmm_crc0, __m128i *xmm_crc1,
  189. __m128i *xmm_crc2, __m128i *xmm_crc3,
  190. __m128i *xmm_crc_part)
  191. {
  192. const __m128i xmm_fold4 = _mm_set_epi32(
  193. 0x00000001, 0x54442bd4,
  194. 0x00000001, 0xc6e41596);
  195. const __m128i xmm_mask3 = _mm_set1_epi32(0x80808080);
  196. __m128i xmm_shl, xmm_shr, xmm_tmp1, xmm_tmp2, xmm_tmp3;
  197. __m128i xmm_a0_0, xmm_a0_1;
  198. __m128 ps_crc3, psa0_0, psa0_1, ps_res;
  199. xmm_shl = _mm_load_si128((__m128i *)pshufb_shf_table + (len - 1));
  200. xmm_shr = xmm_shl;
  201. xmm_shr = _mm_xor_si128(xmm_shr, xmm_mask3);
  202. xmm_a0_0 = _mm_shuffle_epi8(*xmm_crc0, xmm_shl);
  203. *xmm_crc0 = _mm_shuffle_epi8(*xmm_crc0, xmm_shr);
  204. xmm_tmp1 = _mm_shuffle_epi8(*xmm_crc1, xmm_shl);
  205. *xmm_crc0 = _mm_or_si128(*xmm_crc0, xmm_tmp1);
  206. *xmm_crc1 = _mm_shuffle_epi8(*xmm_crc1, xmm_shr);
  207. xmm_tmp2 = _mm_shuffle_epi8(*xmm_crc2, xmm_shl);
  208. *xmm_crc1 = _mm_or_si128(*xmm_crc1, xmm_tmp2);
  209. *xmm_crc2 = _mm_shuffle_epi8(*xmm_crc2, xmm_shr);
  210. xmm_tmp3 = _mm_shuffle_epi8(*xmm_crc3, xmm_shl);
  211. *xmm_crc2 = _mm_or_si128(*xmm_crc2, xmm_tmp3);
  212. *xmm_crc3 = _mm_shuffle_epi8(*xmm_crc3, xmm_shr);
  213. *xmm_crc_part = _mm_shuffle_epi8(*xmm_crc_part, xmm_shl);
  214. *xmm_crc3 = _mm_or_si128(*xmm_crc3, *xmm_crc_part);
  215. xmm_a0_1 = _mm_clmulepi64_si128(xmm_a0_0, xmm_fold4, 0x10);
  216. xmm_a0_0 = _mm_clmulepi64_si128(xmm_a0_0, xmm_fold4, 0x01);
  217. ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
  218. psa0_0 = _mm_castsi128_ps(xmm_a0_0);
  219. psa0_1 = _mm_castsi128_ps(xmm_a0_1);
  220. ps_res = _mm_xor_ps(ps_crc3, psa0_0);
  221. ps_res = _mm_xor_ps(ps_res, psa0_1);
  222. *xmm_crc3 = _mm_castps_si128(ps_res);
  223. }
  224. ZLIB_INTERNAL void crc_fold_copy(deflate_state *const s,
  225. unsigned char *dst, const unsigned char *src, long len)
  226. {
  227. unsigned long algn_diff;
  228. __m128i xmm_t0, xmm_t1, xmm_t2, xmm_t3;
  229. CRC_LOAD(s)
  230. if (len < 16) {
  231. if (len == 0)
  232. return;
  233. goto partial;
  234. }
  235. algn_diff = (0 - (uintptr_t)src) & 0xF;
  236. if (algn_diff) {
  237. xmm_crc_part = _mm_loadu_si128((__m128i *)src);
  238. _mm_storeu_si128((__m128i *)dst, xmm_crc_part);
  239. dst += algn_diff;
  240. src += algn_diff;
  241. len -= algn_diff;
  242. partial_fold(s, algn_diff, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3,
  243. &xmm_crc_part);
  244. }
  245. while ((len -= 64) >= 0) {
  246. xmm_t0 = _mm_load_si128((__m128i *)src);
  247. xmm_t1 = _mm_load_si128((__m128i *)src + 1);
  248. xmm_t2 = _mm_load_si128((__m128i *)src + 2);
  249. xmm_t3 = _mm_load_si128((__m128i *)src + 3);
  250. fold_4(s, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
  251. _mm_storeu_si128((__m128i *)dst, xmm_t0);
  252. _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
  253. _mm_storeu_si128((__m128i *)dst + 2, xmm_t2);
  254. _mm_storeu_si128((__m128i *)dst + 3, xmm_t3);
  255. xmm_crc0 = _mm_xor_si128(xmm_crc0, xmm_t0);
  256. xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_t1);
  257. xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t2);
  258. xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t3);
  259. src += 64;
  260. dst += 64;
  261. }
  262. /*
  263. * len = num bytes left - 64
  264. */
  265. if (len + 16 >= 0) {
  266. len += 16;
  267. xmm_t0 = _mm_load_si128((__m128i *)src);
  268. xmm_t1 = _mm_load_si128((__m128i *)src + 1);
  269. xmm_t2 = _mm_load_si128((__m128i *)src + 2);
  270. fold_3(s, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
  271. _mm_storeu_si128((__m128i *)dst, xmm_t0);
  272. _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
  273. _mm_storeu_si128((__m128i *)dst + 2, xmm_t2);
  274. xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_t0);
  275. xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t1);
  276. xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t2);
  277. if (len == 0)
  278. goto done;
  279. dst += 48;
  280. src += 48;
  281. } else if (len + 32 >= 0) {
  282. len += 32;
  283. xmm_t0 = _mm_load_si128((__m128i *)src);
  284. xmm_t1 = _mm_load_si128((__m128i *)src + 1);
  285. fold_2(s, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
  286. _mm_storeu_si128((__m128i *)dst, xmm_t0);
  287. _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
  288. xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t0);
  289. xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t1);
  290. if (len == 0)
  291. goto done;
  292. dst += 32;
  293. src += 32;
  294. } else if (len + 48 >= 0) {
  295. len += 48;
  296. xmm_t0 = _mm_load_si128((__m128i *)src);
  297. fold_1(s, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
  298. _mm_storeu_si128((__m128i *)dst, xmm_t0);
  299. xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t0);
  300. if (len == 0)
  301. goto done;
  302. dst += 16;
  303. src += 16;
  304. } else {
  305. len += 64;
  306. if (len == 0)
  307. goto done;
  308. }
  309. partial:
  310. #if defined(_MSC_VER)
  311. /* VS does not permit the use of _mm_set_epi64x in 32-bit builds */
  312. {
  313. int32_t parts[4] = {0, 0, 0, 0};
  314. memcpy(&parts, src, len);
  315. xmm_crc_part = _mm_set_epi32(parts[3], parts[2], parts[1], parts[0]);
  316. }
  317. #else
  318. {
  319. int64_t parts[2] = {0, 0};
  320. memcpy(&parts, src, len);
  321. xmm_crc_part = _mm_set_epi64x(parts[1], parts[0]);
  322. }
  323. #endif
  324. _mm_storeu_si128((__m128i *)dst, xmm_crc_part);
  325. partial_fold(s, len, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3,
  326. &xmm_crc_part);
  327. done:
  328. CRC_SAVE(s)
  329. }
  330. local const unsigned zalign(16) crc_k[] = {
  331. 0xccaa009e, 0x00000000, /* rk1 */
  332. 0x751997d0, 0x00000001, /* rk2 */
  333. 0xccaa009e, 0x00000000, /* rk5 */
  334. 0x63cd6124, 0x00000001, /* rk6 */
  335. 0xf7011640, 0x00000001, /* rk7 */
  336. 0xdb710640, 0x00000001 /* rk8 */
  337. };
  338. local const unsigned zalign(16) crc_mask[4] = {
  339. 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000
  340. };
  341. local const unsigned zalign(16) crc_mask2[4] = {
  342. 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF
  343. };
  344. unsigned ZLIB_INTERNAL crc_fold_512to32(deflate_state *const s)
  345. {
  346. const __m128i xmm_mask = _mm_load_si128((__m128i *)crc_mask);
  347. const __m128i xmm_mask2 = _mm_load_si128((__m128i *)crc_mask2);
  348. unsigned crc;
  349. __m128i x_tmp0, x_tmp1, x_tmp2, crc_fold;
  350. CRC_LOAD(s)
  351. /*
  352. * k1
  353. */
  354. crc_fold = _mm_load_si128((__m128i *)crc_k);
  355. x_tmp0 = _mm_clmulepi64_si128(xmm_crc0, crc_fold, 0x10);
  356. xmm_crc0 = _mm_clmulepi64_si128(xmm_crc0, crc_fold, 0x01);
  357. xmm_crc1 = _mm_xor_si128(xmm_crc1, x_tmp0);
  358. xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_crc0);
  359. x_tmp1 = _mm_clmulepi64_si128(xmm_crc1, crc_fold, 0x10);
  360. xmm_crc1 = _mm_clmulepi64_si128(xmm_crc1, crc_fold, 0x01);
  361. xmm_crc2 = _mm_xor_si128(xmm_crc2, x_tmp1);
  362. xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_crc1);
  363. x_tmp2 = _mm_clmulepi64_si128(xmm_crc2, crc_fold, 0x10);
  364. xmm_crc2 = _mm_clmulepi64_si128(xmm_crc2, crc_fold, 0x01);
  365. xmm_crc3 = _mm_xor_si128(xmm_crc3, x_tmp2);
  366. xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2);
  367. /*
  368. * k5
  369. */
  370. crc_fold = _mm_load_si128((__m128i *)crc_k + 1);
  371. xmm_crc0 = xmm_crc3;
  372. xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0);
  373. xmm_crc0 = _mm_srli_si128(xmm_crc0, 8);
  374. xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc0);
  375. xmm_crc0 = xmm_crc3;
  376. xmm_crc3 = _mm_slli_si128(xmm_crc3, 4);
  377. xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0x10);
  378. xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc0);
  379. xmm_crc3 = _mm_and_si128(xmm_crc3, xmm_mask2);
  380. /*
  381. * k7
  382. */
  383. xmm_crc1 = xmm_crc3;
  384. xmm_crc2 = xmm_crc3;
  385. crc_fold = _mm_load_si128((__m128i *)crc_k + 2);
  386. xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0);
  387. xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2);
  388. xmm_crc3 = _mm_and_si128(xmm_crc3, xmm_mask);
  389. xmm_crc2 = xmm_crc3;
  390. xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0x10);
  391. xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2);
  392. xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc1);
  393. crc = _mm_extract_epi32(xmm_crc3, 2);
  394. return ~crc;
  395. CRC_SAVE(s)
  396. }
  397. #endif /* CRC32_SIMD_SSE42_PCLMUL */