sse2.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* -*- linux-c -*- ------------------------------------------------------- *
  3. *
  4. * Copyright 2002 H. Peter Anvin - All Rights Reserved
  5. *
  6. * ----------------------------------------------------------------------- */
  7. /*
  8. * raid6/sse2.c
  9. *
  10. * SSE-2 implementation of RAID-6 syndrome functions
  11. *
  12. */
  13. #include <linux/raid/pq.h>
  14. #include "x86.h"
  15. static const struct raid6_sse_constants {
  16. u64 x1d[2];
  17. } raid6_sse_constants __attribute__((aligned(16))) = {
  18. { 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL },
  19. };
  20. static int raid6_have_sse2(void)
  21. {
  22. /* Not really boot_cpu but "all_cpus" */
  23. return boot_cpu_has(X86_FEATURE_MMX) &&
  24. boot_cpu_has(X86_FEATURE_FXSR) &&
  25. boot_cpu_has(X86_FEATURE_XMM) &&
  26. boot_cpu_has(X86_FEATURE_XMM2);
  27. }
  28. /*
  29. * Plain SSE2 implementation
  30. */
  31. static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs)
  32. {
  33. u8 **dptr = (u8 **)ptrs;
  34. u8 *p, *q;
  35. int d, z, z0;
  36. z0 = disks - 3; /* Highest data disk */
  37. p = dptr[z0+1]; /* XOR parity */
  38. q = dptr[z0+2]; /* RS syndrome */
  39. kernel_fpu_begin();
  40. asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
  41. asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
  42. for ( d = 0 ; d < bytes ; d += 16 ) {
  43. asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
  44. asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */
  45. asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d]));
  46. asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */
  47. asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z0-1][d]));
  48. for ( z = z0-2 ; z >= 0 ; z-- ) {
  49. asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
  50. asm volatile("pcmpgtb %xmm4,%xmm5");
  51. asm volatile("paddb %xmm4,%xmm4");
  52. asm volatile("pand %xmm0,%xmm5");
  53. asm volatile("pxor %xmm5,%xmm4");
  54. asm volatile("pxor %xmm5,%xmm5");
  55. asm volatile("pxor %xmm6,%xmm2");
  56. asm volatile("pxor %xmm6,%xmm4");
  57. asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z][d]));
  58. }
  59. asm volatile("pcmpgtb %xmm4,%xmm5");
  60. asm volatile("paddb %xmm4,%xmm4");
  61. asm volatile("pand %xmm0,%xmm5");
  62. asm volatile("pxor %xmm5,%xmm4");
  63. asm volatile("pxor %xmm5,%xmm5");
  64. asm volatile("pxor %xmm6,%xmm2");
  65. asm volatile("pxor %xmm6,%xmm4");
  66. asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
  67. asm volatile("pxor %xmm2,%xmm2");
  68. asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
  69. asm volatile("pxor %xmm4,%xmm4");
  70. }
  71. asm volatile("sfence" : : : "memory");
  72. kernel_fpu_end();
  73. }
  74. static void raid6_sse21_xor_syndrome(int disks, int start, int stop,
  75. size_t bytes, void **ptrs)
  76. {
  77. u8 **dptr = (u8 **)ptrs;
  78. u8 *p, *q;
  79. int d, z, z0;
  80. z0 = stop; /* P/Q right side optimization */
  81. p = dptr[disks-2]; /* XOR parity */
  82. q = dptr[disks-1]; /* RS syndrome */
  83. kernel_fpu_begin();
  84. asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
  85. for ( d = 0 ; d < bytes ; d += 16 ) {
  86. asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d]));
  87. asm volatile("movdqa %0,%%xmm2" : : "m" (p[d]));
  88. asm volatile("pxor %xmm4,%xmm2");
  89. /* P/Q data pages */
  90. for ( z = z0-1 ; z >= start ; z-- ) {
  91. asm volatile("pxor %xmm5,%xmm5");
  92. asm volatile("pcmpgtb %xmm4,%xmm5");
  93. asm volatile("paddb %xmm4,%xmm4");
  94. asm volatile("pand %xmm0,%xmm5");
  95. asm volatile("pxor %xmm5,%xmm4");
  96. asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
  97. asm volatile("pxor %xmm5,%xmm2");
  98. asm volatile("pxor %xmm5,%xmm4");
  99. }
  100. /* P/Q left side optimization */
  101. for ( z = start-1 ; z >= 0 ; z-- ) {
  102. asm volatile("pxor %xmm5,%xmm5");
  103. asm volatile("pcmpgtb %xmm4,%xmm5");
  104. asm volatile("paddb %xmm4,%xmm4");
  105. asm volatile("pand %xmm0,%xmm5");
  106. asm volatile("pxor %xmm5,%xmm4");
  107. }
  108. asm volatile("pxor %0,%%xmm4" : : "m" (q[d]));
  109. /* Don't use movntdq for r/w memory area < cache line */
  110. asm volatile("movdqa %%xmm4,%0" : "=m" (q[d]));
  111. asm volatile("movdqa %%xmm2,%0" : "=m" (p[d]));
  112. }
  113. asm volatile("sfence" : : : "memory");
  114. kernel_fpu_end();
  115. }
  116. const struct raid6_calls raid6_sse2x1 = {
  117. raid6_sse21_gen_syndrome,
  118. raid6_sse21_xor_syndrome,
  119. raid6_have_sse2,
  120. "sse2x1",
  121. 1 /* Has cache hints */
  122. };
  123. /*
  124. * Unrolled-by-2 SSE2 implementation
  125. */
  126. static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs)
  127. {
  128. u8 **dptr = (u8 **)ptrs;
  129. u8 *p, *q;
  130. int d, z, z0;
  131. z0 = disks - 3; /* Highest data disk */
  132. p = dptr[z0+1]; /* XOR parity */
  133. q = dptr[z0+2]; /* RS syndrome */
  134. kernel_fpu_begin();
  135. asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
  136. asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
  137. asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */
  138. /* We uniformly assume a single prefetch covers at least 32 bytes */
  139. for ( d = 0 ; d < bytes ; d += 32 ) {
  140. asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
  141. asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */
  142. asm volatile("movdqa %0,%%xmm3" : : "m" (dptr[z0][d+16])); /* P[1] */
  143. asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */
  144. asm volatile("movdqa %xmm3,%xmm6"); /* Q[1] */
  145. for ( z = z0-1 ; z >= 0 ; z-- ) {
  146. asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
  147. asm volatile("pcmpgtb %xmm4,%xmm5");
  148. asm volatile("pcmpgtb %xmm6,%xmm7");
  149. asm volatile("paddb %xmm4,%xmm4");
  150. asm volatile("paddb %xmm6,%xmm6");
  151. asm volatile("pand %xmm0,%xmm5");
  152. asm volatile("pand %xmm0,%xmm7");
  153. asm volatile("pxor %xmm5,%xmm4");
  154. asm volatile("pxor %xmm7,%xmm6");
  155. asm volatile("movdqa %0,%%xmm5" : : "m" (dptr[z][d]));
  156. asm volatile("movdqa %0,%%xmm7" : : "m" (dptr[z][d+16]));
  157. asm volatile("pxor %xmm5,%xmm2");
  158. asm volatile("pxor %xmm7,%xmm3");
  159. asm volatile("pxor %xmm5,%xmm4");
  160. asm volatile("pxor %xmm7,%xmm6");
  161. asm volatile("pxor %xmm5,%xmm5");
  162. asm volatile("pxor %xmm7,%xmm7");
  163. }
  164. asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
  165. asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
  166. asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
  167. asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
  168. }
  169. asm volatile("sfence" : : : "memory");
  170. kernel_fpu_end();
  171. }
  172. static void raid6_sse22_xor_syndrome(int disks, int start, int stop,
  173. size_t bytes, void **ptrs)
  174. {
  175. u8 **dptr = (u8 **)ptrs;
  176. u8 *p, *q;
  177. int d, z, z0;
  178. z0 = stop; /* P/Q right side optimization */
  179. p = dptr[disks-2]; /* XOR parity */
  180. q = dptr[disks-1]; /* RS syndrome */
  181. kernel_fpu_begin();
  182. asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
  183. for ( d = 0 ; d < bytes ; d += 32 ) {
  184. asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d]));
  185. asm volatile("movdqa %0,%%xmm6" :: "m" (dptr[z0][d+16]));
  186. asm volatile("movdqa %0,%%xmm2" : : "m" (p[d]));
  187. asm volatile("movdqa %0,%%xmm3" : : "m" (p[d+16]));
  188. asm volatile("pxor %xmm4,%xmm2");
  189. asm volatile("pxor %xmm6,%xmm3");
  190. /* P/Q data pages */
  191. for ( z = z0-1 ; z >= start ; z-- ) {
  192. asm volatile("pxor %xmm5,%xmm5");
  193. asm volatile("pxor %xmm7,%xmm7");
  194. asm volatile("pcmpgtb %xmm4,%xmm5");
  195. asm volatile("pcmpgtb %xmm6,%xmm7");
  196. asm volatile("paddb %xmm4,%xmm4");
  197. asm volatile("paddb %xmm6,%xmm6");
  198. asm volatile("pand %xmm0,%xmm5");
  199. asm volatile("pand %xmm0,%xmm7");
  200. asm volatile("pxor %xmm5,%xmm4");
  201. asm volatile("pxor %xmm7,%xmm6");
  202. asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
  203. asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
  204. asm volatile("pxor %xmm5,%xmm2");
  205. asm volatile("pxor %xmm7,%xmm3");
  206. asm volatile("pxor %xmm5,%xmm4");
  207. asm volatile("pxor %xmm7,%xmm6");
  208. }
  209. /* P/Q left side optimization */
  210. for ( z = start-1 ; z >= 0 ; z-- ) {
  211. asm volatile("pxor %xmm5,%xmm5");
  212. asm volatile("pxor %xmm7,%xmm7");
  213. asm volatile("pcmpgtb %xmm4,%xmm5");
  214. asm volatile("pcmpgtb %xmm6,%xmm7");
  215. asm volatile("paddb %xmm4,%xmm4");
  216. asm volatile("paddb %xmm6,%xmm6");
  217. asm volatile("pand %xmm0,%xmm5");
  218. asm volatile("pand %xmm0,%xmm7");
  219. asm volatile("pxor %xmm5,%xmm4");
  220. asm volatile("pxor %xmm7,%xmm6");
  221. }
  222. asm volatile("pxor %0,%%xmm4" : : "m" (q[d]));
  223. asm volatile("pxor %0,%%xmm6" : : "m" (q[d+16]));
  224. /* Don't use movntdq for r/w memory area < cache line */
  225. asm volatile("movdqa %%xmm4,%0" : "=m" (q[d]));
  226. asm volatile("movdqa %%xmm6,%0" : "=m" (q[d+16]));
  227. asm volatile("movdqa %%xmm2,%0" : "=m" (p[d]));
  228. asm volatile("movdqa %%xmm3,%0" : "=m" (p[d+16]));
  229. }
  230. asm volatile("sfence" : : : "memory");
  231. kernel_fpu_end();
  232. }
  233. const struct raid6_calls raid6_sse2x2 = {
  234. raid6_sse22_gen_syndrome,
  235. raid6_sse22_xor_syndrome,
  236. raid6_have_sse2,
  237. "sse2x2",
  238. 1 /* Has cache hints */
  239. };
  240. #ifdef CONFIG_X86_64
  241. /*
  242. * Unrolled-by-4 SSE2 implementation
  243. */
  244. static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
  245. {
  246. u8 **dptr = (u8 **)ptrs;
  247. u8 *p, *q;
  248. int d, z, z0;
  249. z0 = disks - 3; /* Highest data disk */
  250. p = dptr[z0+1]; /* XOR parity */
  251. q = dptr[z0+2]; /* RS syndrome */
  252. kernel_fpu_begin();
  253. asm volatile("movdqa %0,%%xmm0" :: "m" (raid6_sse_constants.x1d[0]));
  254. asm volatile("pxor %xmm2,%xmm2"); /* P[0] */
  255. asm volatile("pxor %xmm3,%xmm3"); /* P[1] */
  256. asm volatile("pxor %xmm4,%xmm4"); /* Q[0] */
  257. asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
  258. asm volatile("pxor %xmm6,%xmm6"); /* Q[1] */
  259. asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */
  260. asm volatile("pxor %xmm10,%xmm10"); /* P[2] */
  261. asm volatile("pxor %xmm11,%xmm11"); /* P[3] */
  262. asm volatile("pxor %xmm12,%xmm12"); /* Q[2] */
  263. asm volatile("pxor %xmm13,%xmm13"); /* Zero temp */
  264. asm volatile("pxor %xmm14,%xmm14"); /* Q[3] */
  265. asm volatile("pxor %xmm15,%xmm15"); /* Zero temp */
  266. for ( d = 0 ; d < bytes ; d += 64 ) {
  267. for ( z = z0 ; z >= 0 ; z-- ) {
  268. /* The second prefetch seems to improve performance... */
  269. asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
  270. asm volatile("prefetchnta %0" :: "m" (dptr[z][d+32]));
  271. asm volatile("pcmpgtb %xmm4,%xmm5");
  272. asm volatile("pcmpgtb %xmm6,%xmm7");
  273. asm volatile("pcmpgtb %xmm12,%xmm13");
  274. asm volatile("pcmpgtb %xmm14,%xmm15");
  275. asm volatile("paddb %xmm4,%xmm4");
  276. asm volatile("paddb %xmm6,%xmm6");
  277. asm volatile("paddb %xmm12,%xmm12");
  278. asm volatile("paddb %xmm14,%xmm14");
  279. asm volatile("pand %xmm0,%xmm5");
  280. asm volatile("pand %xmm0,%xmm7");
  281. asm volatile("pand %xmm0,%xmm13");
  282. asm volatile("pand %xmm0,%xmm15");
  283. asm volatile("pxor %xmm5,%xmm4");
  284. asm volatile("pxor %xmm7,%xmm6");
  285. asm volatile("pxor %xmm13,%xmm12");
  286. asm volatile("pxor %xmm15,%xmm14");
  287. asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
  288. asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
  289. asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d+32]));
  290. asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d+48]));
  291. asm volatile("pxor %xmm5,%xmm2");
  292. asm volatile("pxor %xmm7,%xmm3");
  293. asm volatile("pxor %xmm13,%xmm10");
  294. asm volatile("pxor %xmm15,%xmm11");
  295. asm volatile("pxor %xmm5,%xmm4");
  296. asm volatile("pxor %xmm7,%xmm6");
  297. asm volatile("pxor %xmm13,%xmm12");
  298. asm volatile("pxor %xmm15,%xmm14");
  299. asm volatile("pxor %xmm5,%xmm5");
  300. asm volatile("pxor %xmm7,%xmm7");
  301. asm volatile("pxor %xmm13,%xmm13");
  302. asm volatile("pxor %xmm15,%xmm15");
  303. }
  304. asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
  305. asm volatile("pxor %xmm2,%xmm2");
  306. asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
  307. asm volatile("pxor %xmm3,%xmm3");
  308. asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32]));
  309. asm volatile("pxor %xmm10,%xmm10");
  310. asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48]));
  311. asm volatile("pxor %xmm11,%xmm11");
  312. asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
  313. asm volatile("pxor %xmm4,%xmm4");
  314. asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
  315. asm volatile("pxor %xmm6,%xmm6");
  316. asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32]));
  317. asm volatile("pxor %xmm12,%xmm12");
  318. asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48]));
  319. asm volatile("pxor %xmm14,%xmm14");
  320. }
  321. asm volatile("sfence" : : : "memory");
  322. kernel_fpu_end();
  323. }
  324. static void raid6_sse24_xor_syndrome(int disks, int start, int stop,
  325. size_t bytes, void **ptrs)
  326. {
  327. u8 **dptr = (u8 **)ptrs;
  328. u8 *p, *q;
  329. int d, z, z0;
  330. z0 = stop; /* P/Q right side optimization */
  331. p = dptr[disks-2]; /* XOR parity */
  332. q = dptr[disks-1]; /* RS syndrome */
  333. kernel_fpu_begin();
  334. asm volatile("movdqa %0,%%xmm0" :: "m" (raid6_sse_constants.x1d[0]));
  335. for ( d = 0 ; d < bytes ; d += 64 ) {
  336. asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d]));
  337. asm volatile("movdqa %0,%%xmm6" :: "m" (dptr[z0][d+16]));
  338. asm volatile("movdqa %0,%%xmm12" :: "m" (dptr[z0][d+32]));
  339. asm volatile("movdqa %0,%%xmm14" :: "m" (dptr[z0][d+48]));
  340. asm volatile("movdqa %0,%%xmm2" : : "m" (p[d]));
  341. asm volatile("movdqa %0,%%xmm3" : : "m" (p[d+16]));
  342. asm volatile("movdqa %0,%%xmm10" : : "m" (p[d+32]));
  343. asm volatile("movdqa %0,%%xmm11" : : "m" (p[d+48]));
  344. asm volatile("pxor %xmm4,%xmm2");
  345. asm volatile("pxor %xmm6,%xmm3");
  346. asm volatile("pxor %xmm12,%xmm10");
  347. asm volatile("pxor %xmm14,%xmm11");
  348. /* P/Q data pages */
  349. for ( z = z0-1 ; z >= start ; z-- ) {
  350. asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
  351. asm volatile("prefetchnta %0" :: "m" (dptr[z][d+32]));
  352. asm volatile("pxor %xmm5,%xmm5");
  353. asm volatile("pxor %xmm7,%xmm7");
  354. asm volatile("pxor %xmm13,%xmm13");
  355. asm volatile("pxor %xmm15,%xmm15");
  356. asm volatile("pcmpgtb %xmm4,%xmm5");
  357. asm volatile("pcmpgtb %xmm6,%xmm7");
  358. asm volatile("pcmpgtb %xmm12,%xmm13");
  359. asm volatile("pcmpgtb %xmm14,%xmm15");
  360. asm volatile("paddb %xmm4,%xmm4");
  361. asm volatile("paddb %xmm6,%xmm6");
  362. asm volatile("paddb %xmm12,%xmm12");
  363. asm volatile("paddb %xmm14,%xmm14");
  364. asm volatile("pand %xmm0,%xmm5");
  365. asm volatile("pand %xmm0,%xmm7");
  366. asm volatile("pand %xmm0,%xmm13");
  367. asm volatile("pand %xmm0,%xmm15");
  368. asm volatile("pxor %xmm5,%xmm4");
  369. asm volatile("pxor %xmm7,%xmm6");
  370. asm volatile("pxor %xmm13,%xmm12");
  371. asm volatile("pxor %xmm15,%xmm14");
  372. asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
  373. asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
  374. asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d+32]));
  375. asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d+48]));
  376. asm volatile("pxor %xmm5,%xmm2");
  377. asm volatile("pxor %xmm7,%xmm3");
  378. asm volatile("pxor %xmm13,%xmm10");
  379. asm volatile("pxor %xmm15,%xmm11");
  380. asm volatile("pxor %xmm5,%xmm4");
  381. asm volatile("pxor %xmm7,%xmm6");
  382. asm volatile("pxor %xmm13,%xmm12");
  383. asm volatile("pxor %xmm15,%xmm14");
  384. }
  385. asm volatile("prefetchnta %0" :: "m" (q[d]));
  386. asm volatile("prefetchnta %0" :: "m" (q[d+32]));
  387. /* P/Q left side optimization */
  388. for ( z = start-1 ; z >= 0 ; z-- ) {
  389. asm volatile("pxor %xmm5,%xmm5");
  390. asm volatile("pxor %xmm7,%xmm7");
  391. asm volatile("pxor %xmm13,%xmm13");
  392. asm volatile("pxor %xmm15,%xmm15");
  393. asm volatile("pcmpgtb %xmm4,%xmm5");
  394. asm volatile("pcmpgtb %xmm6,%xmm7");
  395. asm volatile("pcmpgtb %xmm12,%xmm13");
  396. asm volatile("pcmpgtb %xmm14,%xmm15");
  397. asm volatile("paddb %xmm4,%xmm4");
  398. asm volatile("paddb %xmm6,%xmm6");
  399. asm volatile("paddb %xmm12,%xmm12");
  400. asm volatile("paddb %xmm14,%xmm14");
  401. asm volatile("pand %xmm0,%xmm5");
  402. asm volatile("pand %xmm0,%xmm7");
  403. asm volatile("pand %xmm0,%xmm13");
  404. asm volatile("pand %xmm0,%xmm15");
  405. asm volatile("pxor %xmm5,%xmm4");
  406. asm volatile("pxor %xmm7,%xmm6");
  407. asm volatile("pxor %xmm13,%xmm12");
  408. asm volatile("pxor %xmm15,%xmm14");
  409. }
  410. asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
  411. asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
  412. asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32]));
  413. asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48]));
  414. asm volatile("pxor %0,%%xmm4" : : "m" (q[d]));
  415. asm volatile("pxor %0,%%xmm6" : : "m" (q[d+16]));
  416. asm volatile("pxor %0,%%xmm12" : : "m" (q[d+32]));
  417. asm volatile("pxor %0,%%xmm14" : : "m" (q[d+48]));
  418. asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
  419. asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
  420. asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32]));
  421. asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48]));
  422. }
  423. asm volatile("sfence" : : : "memory");
  424. kernel_fpu_end();
  425. }
  426. const struct raid6_calls raid6_sse2x4 = {
  427. raid6_sse24_gen_syndrome,
  428. raid6_sse24_xor_syndrome,
  429. raid6_have_sse2,
  430. "sse2x4",
  431. 1 /* Has cache hints */
  432. };
  433. #endif /* CONFIG_X86_64 */