avx512.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* -*- linux-c -*- --------------------------------------------------------
  3. *
  4. * Copyright (C) 2016 Intel Corporation
  5. *
  6. * Author: Gayatri Kammela <gayatri.kammela@intel.com>
  7. * Author: Megha Dey <megha.dey@linux.intel.com>
  8. *
  9. * Based on avx2.c: Copyright 2012 Yuanhan Liu All Rights Reserved
  10. * Based on sse2.c: Copyright 2002 H. Peter Anvin - All Rights Reserved
  11. *
  12. * -----------------------------------------------------------------------
  13. */
  14. /*
  15. * AVX512 implementation of RAID-6 syndrome functions
  16. *
  17. */
  18. #ifdef CONFIG_AS_AVX512
  19. #include <linux/raid/pq.h>
  20. #include "x86.h"
  21. static const struct raid6_avx512_constants {
  22. u64 x1d[8];
  23. } raid6_avx512_constants __aligned(512/8) = {
  24. { 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,
  25. 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,
  26. 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,
  27. 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,},
  28. };
  29. static int raid6_have_avx512(void)
  30. {
  31. return boot_cpu_has(X86_FEATURE_AVX2) &&
  32. boot_cpu_has(X86_FEATURE_AVX) &&
  33. boot_cpu_has(X86_FEATURE_AVX512F) &&
  34. boot_cpu_has(X86_FEATURE_AVX512BW) &&
  35. boot_cpu_has(X86_FEATURE_AVX512VL) &&
  36. boot_cpu_has(X86_FEATURE_AVX512DQ);
  37. }
  38. static void raid6_avx5121_gen_syndrome(int disks, size_t bytes, void **ptrs)
  39. {
  40. u8 **dptr = (u8 **)ptrs;
  41. u8 *p, *q;
  42. int d, z, z0;
  43. z0 = disks - 3; /* Highest data disk */
  44. p = dptr[z0+1]; /* XOR parity */
  45. q = dptr[z0+2]; /* RS syndrome */
  46. kernel_fpu_begin();
  47. asm volatile("vmovdqa64 %0,%%zmm0\n\t"
  48. "vpxorq %%zmm1,%%zmm1,%%zmm1" /* Zero temp */
  49. :
  50. : "m" (raid6_avx512_constants.x1d[0]));
  51. for (d = 0; d < bytes; d += 64) {
  52. asm volatile("prefetchnta %0\n\t"
  53. "vmovdqa64 %0,%%zmm2\n\t" /* P[0] */
  54. "prefetchnta %1\n\t"
  55. "vmovdqa64 %%zmm2,%%zmm4\n\t" /* Q[0] */
  56. "vmovdqa64 %1,%%zmm6"
  57. :
  58. : "m" (dptr[z0][d]), "m" (dptr[z0-1][d]));
  59. for (z = z0-2; z >= 0; z--) {
  60. asm volatile("prefetchnta %0\n\t"
  61. "vpcmpgtb %%zmm4,%%zmm1,%%k1\n\t"
  62. "vpmovm2b %%k1,%%zmm5\n\t"
  63. "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
  64. "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
  65. "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
  66. "vpxorq %%zmm6,%%zmm2,%%zmm2\n\t"
  67. "vpxorq %%zmm6,%%zmm4,%%zmm4\n\t"
  68. "vmovdqa64 %0,%%zmm6"
  69. :
  70. : "m" (dptr[z][d]));
  71. }
  72. asm volatile("vpcmpgtb %%zmm4,%%zmm1,%%k1\n\t"
  73. "vpmovm2b %%k1,%%zmm5\n\t"
  74. "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
  75. "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
  76. "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
  77. "vpxorq %%zmm6,%%zmm2,%%zmm2\n\t"
  78. "vpxorq %%zmm6,%%zmm4,%%zmm4\n\t"
  79. "vmovntdq %%zmm2,%0\n\t"
  80. "vpxorq %%zmm2,%%zmm2,%%zmm2\n\t"
  81. "vmovntdq %%zmm4,%1\n\t"
  82. "vpxorq %%zmm4,%%zmm4,%%zmm4"
  83. :
  84. : "m" (p[d]), "m" (q[d]));
  85. }
  86. asm volatile("sfence" : : : "memory");
  87. kernel_fpu_end();
  88. }
  89. static void raid6_avx5121_xor_syndrome(int disks, int start, int stop,
  90. size_t bytes, void **ptrs)
  91. {
  92. u8 **dptr = (u8 **)ptrs;
  93. u8 *p, *q;
  94. int d, z, z0;
  95. z0 = stop; /* P/Q right side optimization */
  96. p = dptr[disks-2]; /* XOR parity */
  97. q = dptr[disks-1]; /* RS syndrome */
  98. kernel_fpu_begin();
  99. asm volatile("vmovdqa64 %0,%%zmm0"
  100. : : "m" (raid6_avx512_constants.x1d[0]));
  101. for (d = 0 ; d < bytes ; d += 64) {
  102. asm volatile("vmovdqa64 %0,%%zmm4\n\t"
  103. "vmovdqa64 %1,%%zmm2\n\t"
  104. "vpxorq %%zmm4,%%zmm2,%%zmm2"
  105. :
  106. : "m" (dptr[z0][d]), "m" (p[d]));
  107. /* P/Q data pages */
  108. for (z = z0-1 ; z >= start ; z--) {
  109. asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
  110. "vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
  111. "vpmovm2b %%k1,%%zmm5\n\t"
  112. "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
  113. "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
  114. "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
  115. "vmovdqa64 %0,%%zmm5\n\t"
  116. "vpxorq %%zmm5,%%zmm2,%%zmm2\n\t"
  117. "vpxorq %%zmm5,%%zmm4,%%zmm4"
  118. :
  119. : "m" (dptr[z][d]));
  120. }
  121. /* P/Q left side optimization */
  122. for (z = start-1 ; z >= 0 ; z--) {
  123. asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
  124. "vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
  125. "vpmovm2b %%k1,%%zmm5\n\t"
  126. "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
  127. "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
  128. "vpxorq %%zmm5,%%zmm4,%%zmm4"
  129. :
  130. : );
  131. }
  132. asm volatile("vpxorq %0,%%zmm4,%%zmm4\n\t"
  133. /* Don't use movntdq for r/w memory area < cache line */
  134. "vmovdqa64 %%zmm4,%0\n\t"
  135. "vmovdqa64 %%zmm2,%1"
  136. :
  137. : "m" (q[d]), "m" (p[d]));
  138. }
  139. asm volatile("sfence" : : : "memory");
  140. kernel_fpu_end();
  141. }
  142. const struct raid6_calls raid6_avx512x1 = {
  143. raid6_avx5121_gen_syndrome,
  144. raid6_avx5121_xor_syndrome,
  145. raid6_have_avx512,
  146. "avx512x1",
  147. 1 /* Has cache hints */
  148. };
  149. /*
  150. * Unrolled-by-2 AVX512 implementation
  151. */
  152. static void raid6_avx5122_gen_syndrome(int disks, size_t bytes, void **ptrs)
  153. {
  154. u8 **dptr = (u8 **)ptrs;
  155. u8 *p, *q;
  156. int d, z, z0;
  157. z0 = disks - 3; /* Highest data disk */
  158. p = dptr[z0+1]; /* XOR parity */
  159. q = dptr[z0+2]; /* RS syndrome */
  160. kernel_fpu_begin();
  161. asm volatile("vmovdqa64 %0,%%zmm0\n\t"
  162. "vpxorq %%zmm1,%%zmm1,%%zmm1" /* Zero temp */
  163. :
  164. : "m" (raid6_avx512_constants.x1d[0]));
  165. /* We uniformly assume a single prefetch covers at least 64 bytes */
  166. for (d = 0; d < bytes; d += 128) {
  167. asm volatile("prefetchnta %0\n\t"
  168. "prefetchnta %1\n\t"
  169. "vmovdqa64 %0,%%zmm2\n\t" /* P[0] */
  170. "vmovdqa64 %1,%%zmm3\n\t" /* P[1] */
  171. "vmovdqa64 %%zmm2,%%zmm4\n\t" /* Q[0] */
  172. "vmovdqa64 %%zmm3,%%zmm6" /* Q[1] */
  173. :
  174. : "m" (dptr[z0][d]), "m" (dptr[z0][d+64]));
  175. for (z = z0-1; z >= 0; z--) {
  176. asm volatile("prefetchnta %0\n\t"
  177. "prefetchnta %1\n\t"
  178. "vpcmpgtb %%zmm4,%%zmm1,%%k1\n\t"
  179. "vpcmpgtb %%zmm6,%%zmm1,%%k2\n\t"
  180. "vpmovm2b %%k1,%%zmm5\n\t"
  181. "vpmovm2b %%k2,%%zmm7\n\t"
  182. "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
  183. "vpaddb %%zmm6,%%zmm6,%%zmm6\n\t"
  184. "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
  185. "vpandq %%zmm0,%%zmm7,%%zmm7\n\t"
  186. "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
  187. "vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
  188. "vmovdqa64 %0,%%zmm5\n\t"
  189. "vmovdqa64 %1,%%zmm7\n\t"
  190. "vpxorq %%zmm5,%%zmm2,%%zmm2\n\t"
  191. "vpxorq %%zmm7,%%zmm3,%%zmm3\n\t"
  192. "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
  193. "vpxorq %%zmm7,%%zmm6,%%zmm6"
  194. :
  195. : "m" (dptr[z][d]), "m" (dptr[z][d+64]));
  196. }
  197. asm volatile("vmovntdq %%zmm2,%0\n\t"
  198. "vmovntdq %%zmm3,%1\n\t"
  199. "vmovntdq %%zmm4,%2\n\t"
  200. "vmovntdq %%zmm6,%3"
  201. :
  202. : "m" (p[d]), "m" (p[d+64]), "m" (q[d]),
  203. "m" (q[d+64]));
  204. }
  205. asm volatile("sfence" : : : "memory");
  206. kernel_fpu_end();
  207. }
  208. static void raid6_avx5122_xor_syndrome(int disks, int start, int stop,
  209. size_t bytes, void **ptrs)
  210. {
  211. u8 **dptr = (u8 **)ptrs;
  212. u8 *p, *q;
  213. int d, z, z0;
  214. z0 = stop; /* P/Q right side optimization */
  215. p = dptr[disks-2]; /* XOR parity */
  216. q = dptr[disks-1]; /* RS syndrome */
  217. kernel_fpu_begin();
  218. asm volatile("vmovdqa64 %0,%%zmm0"
  219. : : "m" (raid6_avx512_constants.x1d[0]));
  220. for (d = 0 ; d < bytes ; d += 128) {
  221. asm volatile("vmovdqa64 %0,%%zmm4\n\t"
  222. "vmovdqa64 %1,%%zmm6\n\t"
  223. "vmovdqa64 %2,%%zmm2\n\t"
  224. "vmovdqa64 %3,%%zmm3\n\t"
  225. "vpxorq %%zmm4,%%zmm2,%%zmm2\n\t"
  226. "vpxorq %%zmm6,%%zmm3,%%zmm3"
  227. :
  228. : "m" (dptr[z0][d]), "m" (dptr[z0][d+64]),
  229. "m" (p[d]), "m" (p[d+64]));
  230. /* P/Q data pages */
  231. for (z = z0-1 ; z >= start ; z--) {
  232. asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
  233. "vpxorq %%zmm7,%%zmm7,%%zmm7\n\t"
  234. "vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
  235. "vpcmpgtb %%zmm6,%%zmm7,%%k2\n\t"
  236. "vpmovm2b %%k1,%%zmm5\n\t"
  237. "vpmovm2b %%k2,%%zmm7\n\t"
  238. "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
  239. "vpaddb %%zmm6,%%zmm6,%%zmm6\n\t"
  240. "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
  241. "vpandq %%zmm0,%%zmm7,%%zmm7\n\t"
  242. "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
  243. "vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
  244. "vmovdqa64 %0,%%zmm5\n\t"
  245. "vmovdqa64 %1,%%zmm7\n\t"
  246. "vpxorq %%zmm5,%%zmm2,%%zmm2\n\t"
  247. "vpxorq %%zmm7,%%zmm3,%%zmm3\n\t"
  248. "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
  249. "vpxorq %%zmm7,%%zmm6,%%zmm6"
  250. :
  251. : "m" (dptr[z][d]), "m" (dptr[z][d+64]));
  252. }
  253. /* P/Q left side optimization */
  254. for (z = start-1 ; z >= 0 ; z--) {
  255. asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
  256. "vpxorq %%zmm7,%%zmm7,%%zmm7\n\t"
  257. "vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
  258. "vpcmpgtb %%zmm6,%%zmm7,%%k2\n\t"
  259. "vpmovm2b %%k1,%%zmm5\n\t"
  260. "vpmovm2b %%k2,%%zmm7\n\t"
  261. "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
  262. "vpaddb %%zmm6,%%zmm6,%%zmm6\n\t"
  263. "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
  264. "vpandq %%zmm0,%%zmm7,%%zmm7\n\t"
  265. "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
  266. "vpxorq %%zmm7,%%zmm6,%%zmm6"
  267. :
  268. : );
  269. }
  270. asm volatile("vpxorq %0,%%zmm4,%%zmm4\n\t"
  271. "vpxorq %1,%%zmm6,%%zmm6\n\t"
  272. /* Don't use movntdq for r/w
  273. * memory area < cache line
  274. */
  275. "vmovdqa64 %%zmm4,%0\n\t"
  276. "vmovdqa64 %%zmm6,%1\n\t"
  277. "vmovdqa64 %%zmm2,%2\n\t"
  278. "vmovdqa64 %%zmm3,%3"
  279. :
  280. : "m" (q[d]), "m" (q[d+64]), "m" (p[d]),
  281. "m" (p[d+64]));
  282. }
  283. asm volatile("sfence" : : : "memory");
  284. kernel_fpu_end();
  285. }
  286. const struct raid6_calls raid6_avx512x2 = {
  287. raid6_avx5122_gen_syndrome,
  288. raid6_avx5122_xor_syndrome,
  289. raid6_have_avx512,
  290. "avx512x2",
  291. 1 /* Has cache hints */
  292. };
  293. #ifdef CONFIG_X86_64
  294. /*
  295. * Unrolled-by-4 AVX2 implementation
  296. */
  297. static void raid6_avx5124_gen_syndrome(int disks, size_t bytes, void **ptrs)
  298. {
  299. u8 **dptr = (u8 **)ptrs;
  300. u8 *p, *q;
  301. int d, z, z0;
  302. z0 = disks - 3; /* Highest data disk */
  303. p = dptr[z0+1]; /* XOR parity */
  304. q = dptr[z0+2]; /* RS syndrome */
  305. kernel_fpu_begin();
  306. asm volatile("vmovdqa64 %0,%%zmm0\n\t"
  307. "vpxorq %%zmm1,%%zmm1,%%zmm1\n\t" /* Zero temp */
  308. "vpxorq %%zmm2,%%zmm2,%%zmm2\n\t" /* P[0] */
  309. "vpxorq %%zmm3,%%zmm3,%%zmm3\n\t" /* P[1] */
  310. "vpxorq %%zmm4,%%zmm4,%%zmm4\n\t" /* Q[0] */
  311. "vpxorq %%zmm6,%%zmm6,%%zmm6\n\t" /* Q[1] */
  312. "vpxorq %%zmm10,%%zmm10,%%zmm10\n\t" /* P[2] */
  313. "vpxorq %%zmm11,%%zmm11,%%zmm11\n\t" /* P[3] */
  314. "vpxorq %%zmm12,%%zmm12,%%zmm12\n\t" /* Q[2] */
  315. "vpxorq %%zmm14,%%zmm14,%%zmm14" /* Q[3] */
  316. :
  317. : "m" (raid6_avx512_constants.x1d[0]));
  318. for (d = 0; d < bytes; d += 256) {
  319. for (z = z0; z >= 0; z--) {
  320. asm volatile("prefetchnta %0\n\t"
  321. "prefetchnta %1\n\t"
  322. "prefetchnta %2\n\t"
  323. "prefetchnta %3\n\t"
  324. "vpcmpgtb %%zmm4,%%zmm1,%%k1\n\t"
  325. "vpcmpgtb %%zmm6,%%zmm1,%%k2\n\t"
  326. "vpcmpgtb %%zmm12,%%zmm1,%%k3\n\t"
  327. "vpcmpgtb %%zmm14,%%zmm1,%%k4\n\t"
  328. "vpmovm2b %%k1,%%zmm5\n\t"
  329. "vpmovm2b %%k2,%%zmm7\n\t"
  330. "vpmovm2b %%k3,%%zmm13\n\t"
  331. "vpmovm2b %%k4,%%zmm15\n\t"
  332. "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
  333. "vpaddb %%zmm6,%%zmm6,%%zmm6\n\t"
  334. "vpaddb %%zmm12,%%zmm12,%%zmm12\n\t"
  335. "vpaddb %%zmm14,%%zmm14,%%zmm14\n\t"
  336. "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
  337. "vpandq %%zmm0,%%zmm7,%%zmm7\n\t"
  338. "vpandq %%zmm0,%%zmm13,%%zmm13\n\t"
  339. "vpandq %%zmm0,%%zmm15,%%zmm15\n\t"
  340. "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
  341. "vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
  342. "vpxorq %%zmm13,%%zmm12,%%zmm12\n\t"
  343. "vpxorq %%zmm15,%%zmm14,%%zmm14\n\t"
  344. "vmovdqa64 %0,%%zmm5\n\t"
  345. "vmovdqa64 %1,%%zmm7\n\t"
  346. "vmovdqa64 %2,%%zmm13\n\t"
  347. "vmovdqa64 %3,%%zmm15\n\t"
  348. "vpxorq %%zmm5,%%zmm2,%%zmm2\n\t"
  349. "vpxorq %%zmm7,%%zmm3,%%zmm3\n\t"
  350. "vpxorq %%zmm13,%%zmm10,%%zmm10\n\t"
  351. "vpxorq %%zmm15,%%zmm11,%%zmm11\n"
  352. "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
  353. "vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
  354. "vpxorq %%zmm13,%%zmm12,%%zmm12\n\t"
  355. "vpxorq %%zmm15,%%zmm14,%%zmm14"
  356. :
  357. : "m" (dptr[z][d]), "m" (dptr[z][d+64]),
  358. "m" (dptr[z][d+128]), "m" (dptr[z][d+192]));
  359. }
  360. asm volatile("vmovntdq %%zmm2,%0\n\t"
  361. "vpxorq %%zmm2,%%zmm2,%%zmm2\n\t"
  362. "vmovntdq %%zmm3,%1\n\t"
  363. "vpxorq %%zmm3,%%zmm3,%%zmm3\n\t"
  364. "vmovntdq %%zmm10,%2\n\t"
  365. "vpxorq %%zmm10,%%zmm10,%%zmm10\n\t"
  366. "vmovntdq %%zmm11,%3\n\t"
  367. "vpxorq %%zmm11,%%zmm11,%%zmm11\n\t"
  368. "vmovntdq %%zmm4,%4\n\t"
  369. "vpxorq %%zmm4,%%zmm4,%%zmm4\n\t"
  370. "vmovntdq %%zmm6,%5\n\t"
  371. "vpxorq %%zmm6,%%zmm6,%%zmm6\n\t"
  372. "vmovntdq %%zmm12,%6\n\t"
  373. "vpxorq %%zmm12,%%zmm12,%%zmm12\n\t"
  374. "vmovntdq %%zmm14,%7\n\t"
  375. "vpxorq %%zmm14,%%zmm14,%%zmm14"
  376. :
  377. : "m" (p[d]), "m" (p[d+64]), "m" (p[d+128]),
  378. "m" (p[d+192]), "m" (q[d]), "m" (q[d+64]),
  379. "m" (q[d+128]), "m" (q[d+192]));
  380. }
  381. asm volatile("sfence" : : : "memory");
  382. kernel_fpu_end();
  383. }
  384. static void raid6_avx5124_xor_syndrome(int disks, int start, int stop,
  385. size_t bytes, void **ptrs)
  386. {
  387. u8 **dptr = (u8 **)ptrs;
  388. u8 *p, *q;
  389. int d, z, z0;
  390. z0 = stop; /* P/Q right side optimization */
  391. p = dptr[disks-2]; /* XOR parity */
  392. q = dptr[disks-1]; /* RS syndrome */
  393. kernel_fpu_begin();
  394. asm volatile("vmovdqa64 %0,%%zmm0"
  395. :: "m" (raid6_avx512_constants.x1d[0]));
  396. for (d = 0 ; d < bytes ; d += 256) {
  397. asm volatile("vmovdqa64 %0,%%zmm4\n\t"
  398. "vmovdqa64 %1,%%zmm6\n\t"
  399. "vmovdqa64 %2,%%zmm12\n\t"
  400. "vmovdqa64 %3,%%zmm14\n\t"
  401. "vmovdqa64 %4,%%zmm2\n\t"
  402. "vmovdqa64 %5,%%zmm3\n\t"
  403. "vmovdqa64 %6,%%zmm10\n\t"
  404. "vmovdqa64 %7,%%zmm11\n\t"
  405. "vpxorq %%zmm4,%%zmm2,%%zmm2\n\t"
  406. "vpxorq %%zmm6,%%zmm3,%%zmm3\n\t"
  407. "vpxorq %%zmm12,%%zmm10,%%zmm10\n\t"
  408. "vpxorq %%zmm14,%%zmm11,%%zmm11"
  409. :
  410. : "m" (dptr[z0][d]), "m" (dptr[z0][d+64]),
  411. "m" (dptr[z0][d+128]), "m" (dptr[z0][d+192]),
  412. "m" (p[d]), "m" (p[d+64]), "m" (p[d+128]),
  413. "m" (p[d+192]));
  414. /* P/Q data pages */
  415. for (z = z0-1 ; z >= start ; z--) {
  416. asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
  417. "vpxorq %%zmm7,%%zmm7,%%zmm7\n\t"
  418. "vpxorq %%zmm13,%%zmm13,%%zmm13\n\t"
  419. "vpxorq %%zmm15,%%zmm15,%%zmm15\n\t"
  420. "prefetchnta %0\n\t"
  421. "prefetchnta %2\n\t"
  422. "vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
  423. "vpcmpgtb %%zmm6,%%zmm7,%%k2\n\t"
  424. "vpcmpgtb %%zmm12,%%zmm13,%%k3\n\t"
  425. "vpcmpgtb %%zmm14,%%zmm15,%%k4\n\t"
  426. "vpmovm2b %%k1,%%zmm5\n\t"
  427. "vpmovm2b %%k2,%%zmm7\n\t"
  428. "vpmovm2b %%k3,%%zmm13\n\t"
  429. "vpmovm2b %%k4,%%zmm15\n\t"
  430. "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
  431. "vpaddb %%zmm6,%%zmm6,%%zmm6\n\t"
  432. "vpaddb %%zmm12,%%zmm12,%%zmm12\n\t"
  433. "vpaddb %%Zmm14,%%zmm14,%%zmm14\n\t"
  434. "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
  435. "vpandq %%zmm0,%%zmm7,%%zmm7\n\t"
  436. "vpandq %%zmm0,%%zmm13,%%zmm13\n\t"
  437. "vpandq %%zmm0,%%zmm15,%%zmm15\n\t"
  438. "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
  439. "vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
  440. "vpxorq %%zmm13,%%zmm12,%%zmm12\n\t"
  441. "vpxorq %%zmm15,%%zmm14,%%zmm14\n\t"
  442. "vmovdqa64 %0,%%zmm5\n\t"
  443. "vmovdqa64 %1,%%zmm7\n\t"
  444. "vmovdqa64 %2,%%zmm13\n\t"
  445. "vmovdqa64 %3,%%zmm15\n\t"
  446. "vpxorq %%zmm5,%%zmm2,%%zmm2\n\t"
  447. "vpxorq %%zmm7,%%zmm3,%%zmm3\n\t"
  448. "vpxorq %%zmm13,%%zmm10,%%zmm10\n\t"
  449. "vpxorq %%zmm15,%%zmm11,%%zmm11\n\t"
  450. "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
  451. "vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
  452. "vpxorq %%zmm13,%%zmm12,%%zmm12\n\t"
  453. "vpxorq %%zmm15,%%zmm14,%%zmm14"
  454. :
  455. : "m" (dptr[z][d]), "m" (dptr[z][d+64]),
  456. "m" (dptr[z][d+128]),
  457. "m" (dptr[z][d+192]));
  458. }
  459. asm volatile("prefetchnta %0\n\t"
  460. "prefetchnta %1\n\t"
  461. :
  462. : "m" (q[d]), "m" (q[d+128]));
  463. /* P/Q left side optimization */
  464. for (z = start-1 ; z >= 0 ; z--) {
  465. asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
  466. "vpxorq %%zmm7,%%zmm7,%%zmm7\n\t"
  467. "vpxorq %%zmm13,%%zmm13,%%zmm13\n\t"
  468. "vpxorq %%zmm15,%%zmm15,%%zmm15\n\t"
  469. "vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
  470. "vpcmpgtb %%zmm6,%%zmm7,%%k2\n\t"
  471. "vpcmpgtb %%zmm12,%%zmm13,%%k3\n\t"
  472. "vpcmpgtb %%zmm14,%%zmm15,%%k4\n\t"
  473. "vpmovm2b %%k1,%%zmm5\n\t"
  474. "vpmovm2b %%k2,%%zmm7\n\t"
  475. "vpmovm2b %%k3,%%zmm13\n\t"
  476. "vpmovm2b %%k4,%%zmm15\n\t"
  477. "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
  478. "vpaddb %%zmm6,%%zmm6,%%zmm6\n\t"
  479. "vpaddb %%zmm12,%%zmm12,%%zmm12\n\t"
  480. "vpaddb %%zmm14,%%zmm14,%%zmm14\n\t"
  481. "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
  482. "vpandq %%zmm0,%%zmm7,%%zmm7\n\t"
  483. "vpandq %%zmm0,%%zmm13,%%zmm13\n\t"
  484. "vpandq %%zmm0,%%zmm15,%%zmm15\n\t"
  485. "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
  486. "vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
  487. "vpxorq %%zmm13,%%zmm12,%%zmm12\n\t"
  488. "vpxorq %%zmm15,%%zmm14,%%zmm14"
  489. :
  490. : );
  491. }
  492. asm volatile("vmovntdq %%zmm2,%0\n\t"
  493. "vmovntdq %%zmm3,%1\n\t"
  494. "vmovntdq %%zmm10,%2\n\t"
  495. "vmovntdq %%zmm11,%3\n\t"
  496. "vpxorq %4,%%zmm4,%%zmm4\n\t"
  497. "vpxorq %5,%%zmm6,%%zmm6\n\t"
  498. "vpxorq %6,%%zmm12,%%zmm12\n\t"
  499. "vpxorq %7,%%zmm14,%%zmm14\n\t"
  500. "vmovntdq %%zmm4,%4\n\t"
  501. "vmovntdq %%zmm6,%5\n\t"
  502. "vmovntdq %%zmm12,%6\n\t"
  503. "vmovntdq %%zmm14,%7"
  504. :
  505. : "m" (p[d]), "m" (p[d+64]), "m" (p[d+128]),
  506. "m" (p[d+192]), "m" (q[d]), "m" (q[d+64]),
  507. "m" (q[d+128]), "m" (q[d+192]));
  508. }
  509. asm volatile("sfence" : : : "memory");
  510. kernel_fpu_end();
  511. }
  512. const struct raid6_calls raid6_avx512x4 = {
  513. raid6_avx5124_gen_syndrome,
  514. raid6_avx5124_xor_syndrome,
  515. raid6_have_avx512,
  516. "avx512x4",
  517. 1 /* Has cache hints */
  518. };
  519. #endif
  520. #endif /* CONFIG_AS_AVX512 */