recov_neon_inner.c 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2012 Intel Corporation
  4. * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
  5. */
  6. #include <arm_neon.h>
  7. #ifdef CONFIG_ARM
  8. /*
  9. * AArch32 does not provide this intrinsic natively because it does not
  10. * implement the underlying instruction. AArch32 only provides a 64-bit
  11. * wide vtbl.8 instruction, so use that instead.
  12. */
  13. static uint8x16_t vqtbl1q_u8(uint8x16_t a, uint8x16_t b)
  14. {
  15. union {
  16. uint8x16_t val;
  17. uint8x8x2_t pair;
  18. } __a = { a };
  19. return vcombine_u8(vtbl2_u8(__a.pair, vget_low_u8(b)),
  20. vtbl2_u8(__a.pair, vget_high_u8(b)));
  21. }
  22. #endif
  23. void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp,
  24. uint8_t *dq, const uint8_t *pbmul,
  25. const uint8_t *qmul)
  26. {
  27. uint8x16_t pm0 = vld1q_u8(pbmul);
  28. uint8x16_t pm1 = vld1q_u8(pbmul + 16);
  29. uint8x16_t qm0 = vld1q_u8(qmul);
  30. uint8x16_t qm1 = vld1q_u8(qmul + 16);
  31. uint8x16_t x0f = vdupq_n_u8(0x0f);
  32. /*
  33. * while ( bytes-- ) {
  34. * uint8_t px, qx, db;
  35. *
  36. * px = *p ^ *dp;
  37. * qx = qmul[*q ^ *dq];
  38. * *dq++ = db = pbmul[px] ^ qx;
  39. * *dp++ = db ^ px;
  40. * p++; q++;
  41. * }
  42. */
  43. while (bytes) {
  44. uint8x16_t vx, vy, px, qx, db;
  45. px = veorq_u8(vld1q_u8(p), vld1q_u8(dp));
  46. vx = veorq_u8(vld1q_u8(q), vld1q_u8(dq));
  47. vy = vshrq_n_u8(vx, 4);
  48. vx = vqtbl1q_u8(qm0, vandq_u8(vx, x0f));
  49. vy = vqtbl1q_u8(qm1, vy);
  50. qx = veorq_u8(vx, vy);
  51. vy = vshrq_n_u8(px, 4);
  52. vx = vqtbl1q_u8(pm0, vandq_u8(px, x0f));
  53. vy = vqtbl1q_u8(pm1, vy);
  54. vx = veorq_u8(vx, vy);
  55. db = veorq_u8(vx, qx);
  56. vst1q_u8(dq, db);
  57. vst1q_u8(dp, veorq_u8(db, px));
  58. bytes -= 16;
  59. p += 16;
  60. q += 16;
  61. dp += 16;
  62. dq += 16;
  63. }
  64. }
  65. void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq,
  66. const uint8_t *qmul)
  67. {
  68. uint8x16_t qm0 = vld1q_u8(qmul);
  69. uint8x16_t qm1 = vld1q_u8(qmul + 16);
  70. uint8x16_t x0f = vdupq_n_u8(0x0f);
  71. /*
  72. * while (bytes--) {
  73. * *p++ ^= *dq = qmul[*q ^ *dq];
  74. * q++; dq++;
  75. * }
  76. */
  77. while (bytes) {
  78. uint8x16_t vx, vy;
  79. vx = veorq_u8(vld1q_u8(q), vld1q_u8(dq));
  80. vy = vshrq_n_u8(vx, 4);
  81. vx = vqtbl1q_u8(qm0, vandq_u8(vx, x0f));
  82. vy = vqtbl1q_u8(qm1, vy);
  83. vx = veorq_u8(vx, vy);
  84. vy = veorq_u8(vx, vld1q_u8(p));
  85. vst1q_u8(dq, vx);
  86. vst1q_u8(p, vy);
  87. bytes -= 16;
  88. p += 16;
  89. q += 16;
  90. dq += 16;
  91. }
  92. }