recov_avx512.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2016 Intel Corporation
  4. *
  5. * Author: Gayatri Kammela <gayatri.kammela@intel.com>
  6. * Author: Megha Dey <megha.dey@linux.intel.com>
  7. */
  8. #ifdef CONFIG_AS_AVX512
  9. #include <linux/raid/pq.h>
  10. #include "x86.h"
  11. static int raid6_has_avx512(void)
  12. {
  13. return boot_cpu_has(X86_FEATURE_AVX2) &&
  14. boot_cpu_has(X86_FEATURE_AVX) &&
  15. boot_cpu_has(X86_FEATURE_AVX512F) &&
  16. boot_cpu_has(X86_FEATURE_AVX512BW) &&
  17. boot_cpu_has(X86_FEATURE_AVX512VL) &&
  18. boot_cpu_has(X86_FEATURE_AVX512DQ);
  19. }
  20. static void raid6_2data_recov_avx512(int disks, size_t bytes, int faila,
  21. int failb, void **ptrs)
  22. {
  23. u8 *p, *q, *dp, *dq;
  24. const u8 *pbmul; /* P multiplier table for B data */
  25. const u8 *qmul; /* Q multiplier table (for both) */
  26. const u8 x0f = 0x0f;
  27. p = (u8 *)ptrs[disks-2];
  28. q = (u8 *)ptrs[disks-1];
  29. /*
  30. * Compute syndrome with zero for the missing data pages
  31. * Use the dead data pages as temporary storage for
  32. * delta p and delta q
  33. */
  34. dp = (u8 *)ptrs[faila];
  35. ptrs[faila] = (void *)raid6_empty_zero_page;
  36. ptrs[disks-2] = dp;
  37. dq = (u8 *)ptrs[failb];
  38. ptrs[failb] = (void *)raid6_empty_zero_page;
  39. ptrs[disks-1] = dq;
  40. raid6_call.gen_syndrome(disks, bytes, ptrs);
  41. /* Restore pointer table */
  42. ptrs[faila] = dp;
  43. ptrs[failb] = dq;
  44. ptrs[disks-2] = p;
  45. ptrs[disks-1] = q;
  46. /* Now, pick the proper data tables */
  47. pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
  48. qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^
  49. raid6_gfexp[failb]]];
  50. kernel_fpu_begin();
  51. /* zmm0 = x0f[16] */
  52. asm volatile("vpbroadcastb %0, %%zmm7" : : "m" (x0f));
  53. while (bytes) {
  54. #ifdef CONFIG_X86_64
  55. asm volatile("vmovdqa64 %0, %%zmm1\n\t"
  56. "vmovdqa64 %1, %%zmm9\n\t"
  57. "vmovdqa64 %2, %%zmm0\n\t"
  58. "vmovdqa64 %3, %%zmm8\n\t"
  59. "vpxorq %4, %%zmm1, %%zmm1\n\t"
  60. "vpxorq %5, %%zmm9, %%zmm9\n\t"
  61. "vpxorq %6, %%zmm0, %%zmm0\n\t"
  62. "vpxorq %7, %%zmm8, %%zmm8"
  63. :
  64. : "m" (q[0]), "m" (q[64]), "m" (p[0]),
  65. "m" (p[64]), "m" (dq[0]), "m" (dq[64]),
  66. "m" (dp[0]), "m" (dp[64]));
  67. /*
  68. * 1 = dq[0] ^ q[0]
  69. * 9 = dq[64] ^ q[64]
  70. * 0 = dp[0] ^ p[0]
  71. * 8 = dp[64] ^ p[64]
  72. */
  73. asm volatile("vbroadcasti64x2 %0, %%zmm4\n\t"
  74. "vbroadcasti64x2 %1, %%zmm5"
  75. :
  76. : "m" (qmul[0]), "m" (qmul[16]));
  77. asm volatile("vpsraw $4, %%zmm1, %%zmm3\n\t"
  78. "vpsraw $4, %%zmm9, %%zmm12\n\t"
  79. "vpandq %%zmm7, %%zmm1, %%zmm1\n\t"
  80. "vpandq %%zmm7, %%zmm9, %%zmm9\n\t"
  81. "vpandq %%zmm7, %%zmm3, %%zmm3\n\t"
  82. "vpandq %%zmm7, %%zmm12, %%zmm12\n\t"
  83. "vpshufb %%zmm9, %%zmm4, %%zmm14\n\t"
  84. "vpshufb %%zmm1, %%zmm4, %%zmm4\n\t"
  85. "vpshufb %%zmm12, %%zmm5, %%zmm15\n\t"
  86. "vpshufb %%zmm3, %%zmm5, %%zmm5\n\t"
  87. "vpxorq %%zmm14, %%zmm15, %%zmm15\n\t"
  88. "vpxorq %%zmm4, %%zmm5, %%zmm5"
  89. :
  90. : );
  91. /*
  92. * 5 = qx[0]
  93. * 15 = qx[64]
  94. */
  95. asm volatile("vbroadcasti64x2 %0, %%zmm4\n\t"
  96. "vbroadcasti64x2 %1, %%zmm1\n\t"
  97. "vpsraw $4, %%zmm0, %%zmm2\n\t"
  98. "vpsraw $4, %%zmm8, %%zmm6\n\t"
  99. "vpandq %%zmm7, %%zmm0, %%zmm3\n\t"
  100. "vpandq %%zmm7, %%zmm8, %%zmm14\n\t"
  101. "vpandq %%zmm7, %%zmm2, %%zmm2\n\t"
  102. "vpandq %%zmm7, %%zmm6, %%zmm6\n\t"
  103. "vpshufb %%zmm14, %%zmm4, %%zmm12\n\t"
  104. "vpshufb %%zmm3, %%zmm4, %%zmm4\n\t"
  105. "vpshufb %%zmm6, %%zmm1, %%zmm13\n\t"
  106. "vpshufb %%zmm2, %%zmm1, %%zmm1\n\t"
  107. "vpxorq %%zmm4, %%zmm1, %%zmm1\n\t"
  108. "vpxorq %%zmm12, %%zmm13, %%zmm13"
  109. :
  110. : "m" (pbmul[0]), "m" (pbmul[16]));
  111. /*
  112. * 1 = pbmul[px[0]]
  113. * 13 = pbmul[px[64]]
  114. */
  115. asm volatile("vpxorq %%zmm5, %%zmm1, %%zmm1\n\t"
  116. "vpxorq %%zmm15, %%zmm13, %%zmm13"
  117. :
  118. : );
  119. /*
  120. * 1 = db = DQ
  121. * 13 = db[64] = DQ[64]
  122. */
  123. asm volatile("vmovdqa64 %%zmm1, %0\n\t"
  124. "vmovdqa64 %%zmm13,%1\n\t"
  125. "vpxorq %%zmm1, %%zmm0, %%zmm0\n\t"
  126. "vpxorq %%zmm13, %%zmm8, %%zmm8"
  127. :
  128. : "m" (dq[0]), "m" (dq[64]));
  129. asm volatile("vmovdqa64 %%zmm0, %0\n\t"
  130. "vmovdqa64 %%zmm8, %1"
  131. :
  132. : "m" (dp[0]), "m" (dp[64]));
  133. bytes -= 128;
  134. p += 128;
  135. q += 128;
  136. dp += 128;
  137. dq += 128;
  138. #else
  139. asm volatile("vmovdqa64 %0, %%zmm1\n\t"
  140. "vmovdqa64 %1, %%zmm0\n\t"
  141. "vpxorq %2, %%zmm1, %%zmm1\n\t"
  142. "vpxorq %3, %%zmm0, %%zmm0"
  143. :
  144. : "m" (*q), "m" (*p), "m"(*dq), "m" (*dp));
  145. /* 1 = dq ^ q; 0 = dp ^ p */
  146. asm volatile("vbroadcasti64x2 %0, %%zmm4\n\t"
  147. "vbroadcasti64x2 %1, %%zmm5"
  148. :
  149. : "m" (qmul[0]), "m" (qmul[16]));
  150. /*
  151. * 1 = dq ^ q
  152. * 3 = dq ^ p >> 4
  153. */
  154. asm volatile("vpsraw $4, %%zmm1, %%zmm3\n\t"
  155. "vpandq %%zmm7, %%zmm1, %%zmm1\n\t"
  156. "vpandq %%zmm7, %%zmm3, %%zmm3\n\t"
  157. "vpshufb %%zmm1, %%zmm4, %%zmm4\n\t"
  158. "vpshufb %%zmm3, %%zmm5, %%zmm5\n\t"
  159. "vpxorq %%zmm4, %%zmm5, %%zmm5"
  160. :
  161. : );
  162. /* 5 = qx */
  163. asm volatile("vbroadcasti64x2 %0, %%zmm4\n\t"
  164. "vbroadcasti64x2 %1, %%zmm1"
  165. :
  166. : "m" (pbmul[0]), "m" (pbmul[16]));
  167. asm volatile("vpsraw $4, %%zmm0, %%zmm2\n\t"
  168. "vpandq %%zmm7, %%zmm0, %%zmm3\n\t"
  169. "vpandq %%zmm7, %%zmm2, %%zmm2\n\t"
  170. "vpshufb %%zmm3, %%zmm4, %%zmm4\n\t"
  171. "vpshufb %%zmm2, %%zmm1, %%zmm1\n\t"
  172. "vpxorq %%zmm4, %%zmm1, %%zmm1"
  173. :
  174. : );
  175. /* 1 = pbmul[px] */
  176. asm volatile("vpxorq %%zmm5, %%zmm1, %%zmm1\n\t"
  177. /* 1 = db = DQ */
  178. "vmovdqa64 %%zmm1, %0\n\t"
  179. :
  180. : "m" (dq[0]));
  181. asm volatile("vpxorq %%zmm1, %%zmm0, %%zmm0\n\t"
  182. "vmovdqa64 %%zmm0, %0"
  183. :
  184. : "m" (dp[0]));
  185. bytes -= 64;
  186. p += 64;
  187. q += 64;
  188. dp += 64;
  189. dq += 64;
  190. #endif
  191. }
  192. kernel_fpu_end();
  193. }
  194. static void raid6_datap_recov_avx512(int disks, size_t bytes, int faila,
  195. void **ptrs)
  196. {
  197. u8 *p, *q, *dq;
  198. const u8 *qmul; /* Q multiplier table */
  199. const u8 x0f = 0x0f;
  200. p = (u8 *)ptrs[disks-2];
  201. q = (u8 *)ptrs[disks-1];
  202. /*
  203. * Compute syndrome with zero for the missing data page
  204. * Use the dead data page as temporary storage for delta q
  205. */
  206. dq = (u8 *)ptrs[faila];
  207. ptrs[faila] = (void *)raid6_empty_zero_page;
  208. ptrs[disks-1] = dq;
  209. raid6_call.gen_syndrome(disks, bytes, ptrs);
  210. /* Restore pointer table */
  211. ptrs[faila] = dq;
  212. ptrs[disks-1] = q;
  213. /* Now, pick the proper data tables */
  214. qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
  215. kernel_fpu_begin();
  216. asm volatile("vpbroadcastb %0, %%zmm7" : : "m" (x0f));
  217. while (bytes) {
  218. #ifdef CONFIG_X86_64
  219. asm volatile("vmovdqa64 %0, %%zmm3\n\t"
  220. "vmovdqa64 %1, %%zmm8\n\t"
  221. "vpxorq %2, %%zmm3, %%zmm3\n\t"
  222. "vpxorq %3, %%zmm8, %%zmm8"
  223. :
  224. : "m" (dq[0]), "m" (dq[64]), "m" (q[0]),
  225. "m" (q[64]));
  226. /*
  227. * 3 = q[0] ^ dq[0]
  228. * 8 = q[64] ^ dq[64]
  229. */
  230. asm volatile("vbroadcasti64x2 %0, %%zmm0\n\t"
  231. "vmovapd %%zmm0, %%zmm13\n\t"
  232. "vbroadcasti64x2 %1, %%zmm1\n\t"
  233. "vmovapd %%zmm1, %%zmm14"
  234. :
  235. : "m" (qmul[0]), "m" (qmul[16]));
  236. asm volatile("vpsraw $4, %%zmm3, %%zmm6\n\t"
  237. "vpsraw $4, %%zmm8, %%zmm12\n\t"
  238. "vpandq %%zmm7, %%zmm3, %%zmm3\n\t"
  239. "vpandq %%zmm7, %%zmm8, %%zmm8\n\t"
  240. "vpandq %%zmm7, %%zmm6, %%zmm6\n\t"
  241. "vpandq %%zmm7, %%zmm12, %%zmm12\n\t"
  242. "vpshufb %%zmm3, %%zmm0, %%zmm0\n\t"
  243. "vpshufb %%zmm8, %%zmm13, %%zmm13\n\t"
  244. "vpshufb %%zmm6, %%zmm1, %%zmm1\n\t"
  245. "vpshufb %%zmm12, %%zmm14, %%zmm14\n\t"
  246. "vpxorq %%zmm0, %%zmm1, %%zmm1\n\t"
  247. "vpxorq %%zmm13, %%zmm14, %%zmm14"
  248. :
  249. : );
  250. /*
  251. * 1 = qmul[q[0] ^ dq[0]]
  252. * 14 = qmul[q[64] ^ dq[64]]
  253. */
  254. asm volatile("vmovdqa64 %0, %%zmm2\n\t"
  255. "vmovdqa64 %1, %%zmm12\n\t"
  256. "vpxorq %%zmm1, %%zmm2, %%zmm2\n\t"
  257. "vpxorq %%zmm14, %%zmm12, %%zmm12"
  258. :
  259. : "m" (p[0]), "m" (p[64]));
  260. /*
  261. * 2 = p[0] ^ qmul[q[0] ^ dq[0]]
  262. * 12 = p[64] ^ qmul[q[64] ^ dq[64]]
  263. */
  264. asm volatile("vmovdqa64 %%zmm1, %0\n\t"
  265. "vmovdqa64 %%zmm14, %1\n\t"
  266. "vmovdqa64 %%zmm2, %2\n\t"
  267. "vmovdqa64 %%zmm12,%3"
  268. :
  269. : "m" (dq[0]), "m" (dq[64]), "m" (p[0]),
  270. "m" (p[64]));
  271. bytes -= 128;
  272. p += 128;
  273. q += 128;
  274. dq += 128;
  275. #else
  276. asm volatile("vmovdqa64 %0, %%zmm3\n\t"
  277. "vpxorq %1, %%zmm3, %%zmm3"
  278. :
  279. : "m" (dq[0]), "m" (q[0]));
  280. /* 3 = q ^ dq */
  281. asm volatile("vbroadcasti64x2 %0, %%zmm0\n\t"
  282. "vbroadcasti64x2 %1, %%zmm1"
  283. :
  284. : "m" (qmul[0]), "m" (qmul[16]));
  285. asm volatile("vpsraw $4, %%zmm3, %%zmm6\n\t"
  286. "vpandq %%zmm7, %%zmm3, %%zmm3\n\t"
  287. "vpandq %%zmm7, %%zmm6, %%zmm6\n\t"
  288. "vpshufb %%zmm3, %%zmm0, %%zmm0\n\t"
  289. "vpshufb %%zmm6, %%zmm1, %%zmm1\n\t"
  290. "vpxorq %%zmm0, %%zmm1, %%zmm1"
  291. :
  292. : );
  293. /* 1 = qmul[q ^ dq] */
  294. asm volatile("vmovdqa64 %0, %%zmm2\n\t"
  295. "vpxorq %%zmm1, %%zmm2, %%zmm2"
  296. :
  297. : "m" (p[0]));
  298. /* 2 = p ^ qmul[q ^ dq] */
  299. asm volatile("vmovdqa64 %%zmm1, %0\n\t"
  300. "vmovdqa64 %%zmm2, %1"
  301. :
  302. : "m" (dq[0]), "m" (p[0]));
  303. bytes -= 64;
  304. p += 64;
  305. q += 64;
  306. dq += 64;
  307. #endif
  308. }
  309. kernel_fpu_end();
  310. }
  311. const struct raid6_recov_calls raid6_recov_avx512 = {
  312. .data2 = raid6_2data_recov_avx512,
  313. .datap = raid6_datap_recov_avx512,
  314. .valid = raid6_has_avx512,
  315. #ifdef CONFIG_X86_64
  316. .name = "avx512x2",
  317. #else
  318. .name = "avx512x1",
  319. #endif
  320. .priority = 3,
  321. };
  322. #else
  323. #warning "your version of binutils lacks AVX512 support"
  324. #endif