ghash-ce-core.S 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Accelerated GHASH implementation with NEON/ARMv8 vmull.p8/64 instructions.
  4. *
  5. * Copyright (C) 2015 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
  6. */
  7. #include <linux/linkage.h>
  8. #include <asm/assembler.h>
  9. .arch armv8-a
  10. .fpu crypto-neon-fp-armv8
  11. SHASH .req q0
  12. T1 .req q1
  13. XL .req q2
  14. XM .req q3
  15. XH .req q4
  16. IN1 .req q4
  17. SHASH_L .req d0
  18. SHASH_H .req d1
  19. T1_L .req d2
  20. T1_H .req d3
  21. XL_L .req d4
  22. XL_H .req d5
  23. XM_L .req d6
  24. XM_H .req d7
  25. XH_L .req d8
  26. t0l .req d10
  27. t0h .req d11
  28. t1l .req d12
  29. t1h .req d13
  30. t2l .req d14
  31. t2h .req d15
  32. t3l .req d16
  33. t3h .req d17
  34. t4l .req d18
  35. t4h .req d19
  36. t0q .req q5
  37. t1q .req q6
  38. t2q .req q7
  39. t3q .req q8
  40. t4q .req q9
  41. T2 .req q9
  42. s1l .req d20
  43. s1h .req d21
  44. s2l .req d22
  45. s2h .req d23
  46. s3l .req d24
  47. s3h .req d25
  48. s4l .req d26
  49. s4h .req d27
  50. MASK .req d28
  51. SHASH2_p8 .req d28
  52. k16 .req d29
  53. k32 .req d30
  54. k48 .req d31
  55. SHASH2_p64 .req d31
  56. HH .req q10
  57. HH3 .req q11
  58. HH4 .req q12
  59. HH34 .req q13
  60. HH_L .req d20
  61. HH_H .req d21
  62. HH3_L .req d22
  63. HH3_H .req d23
  64. HH4_L .req d24
  65. HH4_H .req d25
  66. HH34_L .req d26
  67. HH34_H .req d27
  68. SHASH2_H .req d29
  69. XL2 .req q5
  70. XM2 .req q6
  71. XH2 .req q7
  72. T3 .req q8
  73. XL2_L .req d10
  74. XL2_H .req d11
  75. XM2_L .req d12
  76. XM2_H .req d13
  77. T3_L .req d16
  78. T3_H .req d17
  79. .text
  80. .macro __pmull_p64, rd, rn, rm, b1, b2, b3, b4
  81. vmull.p64 \rd, \rn, \rm
  82. .endm
  83. /*
  84. * This implementation of 64x64 -> 128 bit polynomial multiplication
  85. * using vmull.p8 instructions (8x8 -> 16) is taken from the paper
  86. * "Fast Software Polynomial Multiplication on ARM Processors Using
  87. * the NEON Engine" by Danilo Camara, Conrado Gouvea, Julio Lopez and
  88. * Ricardo Dahab (https://hal.inria.fr/hal-01506572)
  89. *
  90. * It has been slightly tweaked for in-order performance, and to allow
  91. * 'rq' to overlap with 'ad' or 'bd'.
  92. */
  93. .macro __pmull_p8, rq, ad, bd, b1=t4l, b2=t3l, b3=t4l, b4=t3l
  94. vext.8 t0l, \ad, \ad, #1 @ A1
  95. .ifc \b1, t4l
  96. vext.8 t4l, \bd, \bd, #1 @ B1
  97. .endif
  98. vmull.p8 t0q, t0l, \bd @ F = A1*B
  99. vext.8 t1l, \ad, \ad, #2 @ A2
  100. vmull.p8 t4q, \ad, \b1 @ E = A*B1
  101. .ifc \b2, t3l
  102. vext.8 t3l, \bd, \bd, #2 @ B2
  103. .endif
  104. vmull.p8 t1q, t1l, \bd @ H = A2*B
  105. vext.8 t2l, \ad, \ad, #3 @ A3
  106. vmull.p8 t3q, \ad, \b2 @ G = A*B2
  107. veor t0q, t0q, t4q @ L = E + F
  108. .ifc \b3, t4l
  109. vext.8 t4l, \bd, \bd, #3 @ B3
  110. .endif
  111. vmull.p8 t2q, t2l, \bd @ J = A3*B
  112. veor t0l, t0l, t0h @ t0 = (L) (P0 + P1) << 8
  113. veor t1q, t1q, t3q @ M = G + H
  114. .ifc \b4, t3l
  115. vext.8 t3l, \bd, \bd, #4 @ B4
  116. .endif
  117. vmull.p8 t4q, \ad, \b3 @ I = A*B3
  118. veor t1l, t1l, t1h @ t1 = (M) (P2 + P3) << 16
  119. vmull.p8 t3q, \ad, \b4 @ K = A*B4
  120. vand t0h, t0h, k48
  121. vand t1h, t1h, k32
  122. veor t2q, t2q, t4q @ N = I + J
  123. veor t0l, t0l, t0h
  124. veor t1l, t1l, t1h
  125. veor t2l, t2l, t2h @ t2 = (N) (P4 + P5) << 24
  126. vand t2h, t2h, k16
  127. veor t3l, t3l, t3h @ t3 = (K) (P6 + P7) << 32
  128. vmov.i64 t3h, #0
  129. vext.8 t0q, t0q, t0q, #15
  130. veor t2l, t2l, t2h
  131. vext.8 t1q, t1q, t1q, #14
  132. vmull.p8 \rq, \ad, \bd @ D = A*B
  133. vext.8 t2q, t2q, t2q, #13
  134. vext.8 t3q, t3q, t3q, #12
  135. veor t0q, t0q, t1q
  136. veor t2q, t2q, t3q
  137. veor \rq, \rq, t0q
  138. veor \rq, \rq, t2q
  139. .endm
  140. //
  141. // PMULL (64x64->128) based reduction for CPUs that can do
  142. // it in a single instruction.
  143. //
  144. .macro __pmull_reduce_p64
  145. vmull.p64 T1, XL_L, MASK
  146. veor XH_L, XH_L, XM_H
  147. vext.8 T1, T1, T1, #8
  148. veor XL_H, XL_H, XM_L
  149. veor T1, T1, XL
  150. vmull.p64 XL, T1_H, MASK
  151. .endm
  152. //
  153. // Alternative reduction for CPUs that lack support for the
  154. // 64x64->128 PMULL instruction
  155. //
  156. .macro __pmull_reduce_p8
  157. veor XL_H, XL_H, XM_L
  158. veor XH_L, XH_L, XM_H
  159. vshl.i64 T1, XL, #57
  160. vshl.i64 T2, XL, #62
  161. veor T1, T1, T2
  162. vshl.i64 T2, XL, #63
  163. veor T1, T1, T2
  164. veor XL_H, XL_H, T1_L
  165. veor XH_L, XH_L, T1_H
  166. vshr.u64 T1, XL, #1
  167. veor XH, XH, XL
  168. veor XL, XL, T1
  169. vshr.u64 T1, T1, #6
  170. vshr.u64 XL, XL, #1
  171. .endm
  172. .macro ghash_update, pn
  173. vld1.64 {XL}, [r1]
  174. /* do the head block first, if supplied */
  175. ldr ip, [sp]
  176. teq ip, #0
  177. beq 0f
  178. vld1.64 {T1}, [ip]
  179. teq r0, #0
  180. b 3f
  181. 0: .ifc \pn, p64
  182. tst r0, #3 // skip until #blocks is a
  183. bne 2f // round multiple of 4
  184. vld1.8 {XL2-XM2}, [r2]!
  185. 1: vld1.8 {T3-T2}, [r2]!
  186. vrev64.8 XL2, XL2
  187. vrev64.8 XM2, XM2
  188. subs r0, r0, #4
  189. vext.8 T1, XL2, XL2, #8
  190. veor XL2_H, XL2_H, XL_L
  191. veor XL, XL, T1
  192. vrev64.8 T3, T3
  193. vrev64.8 T1, T2
  194. vmull.p64 XH, HH4_H, XL_H // a1 * b1
  195. veor XL2_H, XL2_H, XL_H
  196. vmull.p64 XL, HH4_L, XL_L // a0 * b0
  197. vmull.p64 XM, HH34_H, XL2_H // (a1 + a0)(b1 + b0)
  198. vmull.p64 XH2, HH3_H, XM2_L // a1 * b1
  199. veor XM2_L, XM2_L, XM2_H
  200. vmull.p64 XL2, HH3_L, XM2_H // a0 * b0
  201. vmull.p64 XM2, HH34_L, XM2_L // (a1 + a0)(b1 + b0)
  202. veor XH, XH, XH2
  203. veor XL, XL, XL2
  204. veor XM, XM, XM2
  205. vmull.p64 XH2, HH_H, T3_L // a1 * b1
  206. veor T3_L, T3_L, T3_H
  207. vmull.p64 XL2, HH_L, T3_H // a0 * b0
  208. vmull.p64 XM2, SHASH2_H, T3_L // (a1 + a0)(b1 + b0)
  209. veor XH, XH, XH2
  210. veor XL, XL, XL2
  211. veor XM, XM, XM2
  212. vmull.p64 XH2, SHASH_H, T1_L // a1 * b1
  213. veor T1_L, T1_L, T1_H
  214. vmull.p64 XL2, SHASH_L, T1_H // a0 * b0
  215. vmull.p64 XM2, SHASH2_p64, T1_L // (a1 + a0)(b1 + b0)
  216. veor XH, XH, XH2
  217. veor XL, XL, XL2
  218. veor XM, XM, XM2
  219. beq 4f
  220. vld1.8 {XL2-XM2}, [r2]!
  221. veor T1, XL, XH
  222. veor XM, XM, T1
  223. __pmull_reduce_p64
  224. veor T1, T1, XH
  225. veor XL, XL, T1
  226. b 1b
  227. .endif
  228. 2: vld1.64 {T1}, [r2]!
  229. subs r0, r0, #1
  230. 3: /* multiply XL by SHASH in GF(2^128) */
  231. #ifndef CONFIG_CPU_BIG_ENDIAN
  232. vrev64.8 T1, T1
  233. #endif
  234. vext.8 IN1, T1, T1, #8
  235. veor T1_L, T1_L, XL_H
  236. veor XL, XL, IN1
  237. __pmull_\pn XH, XL_H, SHASH_H, s1h, s2h, s3h, s4h @ a1 * b1
  238. veor T1, T1, XL
  239. __pmull_\pn XL, XL_L, SHASH_L, s1l, s2l, s3l, s4l @ a0 * b0
  240. __pmull_\pn XM, T1_L, SHASH2_\pn @ (a1+a0)(b1+b0)
  241. 4: veor T1, XL, XH
  242. veor XM, XM, T1
  243. __pmull_reduce_\pn
  244. veor T1, T1, XH
  245. veor XL, XL, T1
  246. bne 0b
  247. vst1.64 {XL}, [r1]
  248. bx lr
  249. .endm
  250. /*
  251. * void pmull_ghash_update(int blocks, u64 dg[], const char *src,
  252. * struct ghash_key const *k, const char *head)
  253. */
  254. ENTRY(pmull_ghash_update_p64)
  255. vld1.64 {SHASH}, [r3]!
  256. vld1.64 {HH}, [r3]!
  257. vld1.64 {HH3-HH4}, [r3]
  258. veor SHASH2_p64, SHASH_L, SHASH_H
  259. veor SHASH2_H, HH_L, HH_H
  260. veor HH34_L, HH3_L, HH3_H
  261. veor HH34_H, HH4_L, HH4_H
  262. vmov.i8 MASK, #0xe1
  263. vshl.u64 MASK, MASK, #57
  264. ghash_update p64
  265. ENDPROC(pmull_ghash_update_p64)
  266. ENTRY(pmull_ghash_update_p8)
  267. vld1.64 {SHASH}, [r3]
  268. veor SHASH2_p8, SHASH_L, SHASH_H
  269. vext.8 s1l, SHASH_L, SHASH_L, #1
  270. vext.8 s2l, SHASH_L, SHASH_L, #2
  271. vext.8 s3l, SHASH_L, SHASH_L, #3
  272. vext.8 s4l, SHASH_L, SHASH_L, #4
  273. vext.8 s1h, SHASH_H, SHASH_H, #1
  274. vext.8 s2h, SHASH_H, SHASH_H, #2
  275. vext.8 s3h, SHASH_H, SHASH_H, #3
  276. vext.8 s4h, SHASH_H, SHASH_H, #4
  277. vmov.i64 k16, #0xffff
  278. vmov.i64 k32, #0xffffffff
  279. vmov.i64 k48, #0xffffffffffff
  280. ghash_update p8
  281. ENDPROC(pmull_ghash_update_p8)