xor-neon.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * arch/arm64/lib/xor-neon.c
  4. *
  5. * Authors: Jackie Liu <liuyun01@kylinos.cn>
  6. * Copyright (C) 2018,Tianjin KYLIN Information Technology Co., Ltd.
  7. */
  8. #include <linux/raid/xor.h>
  9. #include <linux/module.h>
  10. #include <asm/neon-intrinsics.h>
  11. void xor_arm64_neon_2(unsigned long bytes, unsigned long *p1,
  12. unsigned long *p2)
  13. {
  14. uint64_t *dp1 = (uint64_t *)p1;
  15. uint64_t *dp2 = (uint64_t *)p2;
  16. register uint64x2_t v0, v1, v2, v3;
  17. long lines = bytes / (sizeof(uint64x2_t) * 4);
  18. do {
  19. /* p1 ^= p2 */
  20. v0 = veorq_u64(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0));
  21. v1 = veorq_u64(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2));
  22. v2 = veorq_u64(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4));
  23. v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6));
  24. /* store */
  25. vst1q_u64(dp1 + 0, v0);
  26. vst1q_u64(dp1 + 2, v1);
  27. vst1q_u64(dp1 + 4, v2);
  28. vst1q_u64(dp1 + 6, v3);
  29. dp1 += 8;
  30. dp2 += 8;
  31. } while (--lines > 0);
  32. }
  33. void xor_arm64_neon_3(unsigned long bytes, unsigned long *p1,
  34. unsigned long *p2, unsigned long *p3)
  35. {
  36. uint64_t *dp1 = (uint64_t *)p1;
  37. uint64_t *dp2 = (uint64_t *)p2;
  38. uint64_t *dp3 = (uint64_t *)p3;
  39. register uint64x2_t v0, v1, v2, v3;
  40. long lines = bytes / (sizeof(uint64x2_t) * 4);
  41. do {
  42. /* p1 ^= p2 */
  43. v0 = veorq_u64(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0));
  44. v1 = veorq_u64(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2));
  45. v2 = veorq_u64(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4));
  46. v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6));
  47. /* p1 ^= p3 */
  48. v0 = veorq_u64(v0, vld1q_u64(dp3 + 0));
  49. v1 = veorq_u64(v1, vld1q_u64(dp3 + 2));
  50. v2 = veorq_u64(v2, vld1q_u64(dp3 + 4));
  51. v3 = veorq_u64(v3, vld1q_u64(dp3 + 6));
  52. /* store */
  53. vst1q_u64(dp1 + 0, v0);
  54. vst1q_u64(dp1 + 2, v1);
  55. vst1q_u64(dp1 + 4, v2);
  56. vst1q_u64(dp1 + 6, v3);
  57. dp1 += 8;
  58. dp2 += 8;
  59. dp3 += 8;
  60. } while (--lines > 0);
  61. }
  62. void xor_arm64_neon_4(unsigned long bytes, unsigned long *p1,
  63. unsigned long *p2, unsigned long *p3, unsigned long *p4)
  64. {
  65. uint64_t *dp1 = (uint64_t *)p1;
  66. uint64_t *dp2 = (uint64_t *)p2;
  67. uint64_t *dp3 = (uint64_t *)p3;
  68. uint64_t *dp4 = (uint64_t *)p4;
  69. register uint64x2_t v0, v1, v2, v3;
  70. long lines = bytes / (sizeof(uint64x2_t) * 4);
  71. do {
  72. /* p1 ^= p2 */
  73. v0 = veorq_u64(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0));
  74. v1 = veorq_u64(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2));
  75. v2 = veorq_u64(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4));
  76. v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6));
  77. /* p1 ^= p3 */
  78. v0 = veorq_u64(v0, vld1q_u64(dp3 + 0));
  79. v1 = veorq_u64(v1, vld1q_u64(dp3 + 2));
  80. v2 = veorq_u64(v2, vld1q_u64(dp3 + 4));
  81. v3 = veorq_u64(v3, vld1q_u64(dp3 + 6));
  82. /* p1 ^= p4 */
  83. v0 = veorq_u64(v0, vld1q_u64(dp4 + 0));
  84. v1 = veorq_u64(v1, vld1q_u64(dp4 + 2));
  85. v2 = veorq_u64(v2, vld1q_u64(dp4 + 4));
  86. v3 = veorq_u64(v3, vld1q_u64(dp4 + 6));
  87. /* store */
  88. vst1q_u64(dp1 + 0, v0);
  89. vst1q_u64(dp1 + 2, v1);
  90. vst1q_u64(dp1 + 4, v2);
  91. vst1q_u64(dp1 + 6, v3);
  92. dp1 += 8;
  93. dp2 += 8;
  94. dp3 += 8;
  95. dp4 += 8;
  96. } while (--lines > 0);
  97. }
  98. void xor_arm64_neon_5(unsigned long bytes, unsigned long *p1,
  99. unsigned long *p2, unsigned long *p3,
  100. unsigned long *p4, unsigned long *p5)
  101. {
  102. uint64_t *dp1 = (uint64_t *)p1;
  103. uint64_t *dp2 = (uint64_t *)p2;
  104. uint64_t *dp3 = (uint64_t *)p3;
  105. uint64_t *dp4 = (uint64_t *)p4;
  106. uint64_t *dp5 = (uint64_t *)p5;
  107. register uint64x2_t v0, v1, v2, v3;
  108. long lines = bytes / (sizeof(uint64x2_t) * 4);
  109. do {
  110. /* p1 ^= p2 */
  111. v0 = veorq_u64(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0));
  112. v1 = veorq_u64(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2));
  113. v2 = veorq_u64(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4));
  114. v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6));
  115. /* p1 ^= p3 */
  116. v0 = veorq_u64(v0, vld1q_u64(dp3 + 0));
  117. v1 = veorq_u64(v1, vld1q_u64(dp3 + 2));
  118. v2 = veorq_u64(v2, vld1q_u64(dp3 + 4));
  119. v3 = veorq_u64(v3, vld1q_u64(dp3 + 6));
  120. /* p1 ^= p4 */
  121. v0 = veorq_u64(v0, vld1q_u64(dp4 + 0));
  122. v1 = veorq_u64(v1, vld1q_u64(dp4 + 2));
  123. v2 = veorq_u64(v2, vld1q_u64(dp4 + 4));
  124. v3 = veorq_u64(v3, vld1q_u64(dp4 + 6));
  125. /* p1 ^= p5 */
  126. v0 = veorq_u64(v0, vld1q_u64(dp5 + 0));
  127. v1 = veorq_u64(v1, vld1q_u64(dp5 + 2));
  128. v2 = veorq_u64(v2, vld1q_u64(dp5 + 4));
  129. v3 = veorq_u64(v3, vld1q_u64(dp5 + 6));
  130. /* store */
  131. vst1q_u64(dp1 + 0, v0);
  132. vst1q_u64(dp1 + 2, v1);
  133. vst1q_u64(dp1 + 4, v2);
  134. vst1q_u64(dp1 + 6, v3);
  135. dp1 += 8;
  136. dp2 += 8;
  137. dp3 += 8;
  138. dp4 += 8;
  139. dp5 += 8;
  140. } while (--lines > 0);
  141. }
  142. struct xor_block_template const xor_block_inner_neon = {
  143. .name = "__inner_neon__",
  144. .do_2 = xor_arm64_neon_2,
  145. .do_3 = xor_arm64_neon_3,
  146. .do_4 = xor_arm64_neon_4,
  147. .do_5 = xor_arm64_neon_5,
  148. };
  149. EXPORT_SYMBOL(xor_block_inner_neon);
  150. MODULE_AUTHOR("Jackie Liu <liuyun01@kylinos.cn>");
  151. MODULE_DESCRIPTION("ARMv8 XOR Extensions");
  152. MODULE_LICENSE("GPL");