ghash-ce-core.S 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Accelerated GHASH implementation with ARMv8 PMULL instructions.
  4. *
  5. * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
  6. */
  7. #include <linux/linkage.h>
  8. #include <asm/assembler.h>
  9. SHASH .req v0
  10. SHASH2 .req v1
  11. T1 .req v2
  12. T2 .req v3
  13. MASK .req v4
  14. XM .req v5
  15. XL .req v6
  16. XH .req v7
  17. IN1 .req v7
  18. k00_16 .req v8
  19. k32_48 .req v9
  20. t3 .req v10
  21. t4 .req v11
  22. t5 .req v12
  23. t6 .req v13
  24. t7 .req v14
  25. t8 .req v15
  26. t9 .req v16
  27. perm1 .req v17
  28. perm2 .req v18
  29. perm3 .req v19
  30. sh1 .req v20
  31. sh2 .req v21
  32. sh3 .req v22
  33. sh4 .req v23
  34. ss1 .req v24
  35. ss2 .req v25
  36. ss3 .req v26
  37. ss4 .req v27
  38. XL2 .req v8
  39. XM2 .req v9
  40. XH2 .req v10
  41. XL3 .req v11
  42. XM3 .req v12
  43. XH3 .req v13
  44. TT3 .req v14
  45. TT4 .req v15
  46. HH .req v16
  47. HH3 .req v17
  48. HH4 .req v18
  49. HH34 .req v19
  50. .text
  51. .arch armv8-a+crypto
  52. .macro __pmull_p64, rd, rn, rm
  53. pmull \rd\().1q, \rn\().1d, \rm\().1d
  54. .endm
  55. .macro __pmull2_p64, rd, rn, rm
  56. pmull2 \rd\().1q, \rn\().2d, \rm\().2d
  57. .endm
  58. .macro __pmull_p8, rq, ad, bd
  59. ext t3.8b, \ad\().8b, \ad\().8b, #1 // A1
  60. ext t5.8b, \ad\().8b, \ad\().8b, #2 // A2
  61. ext t7.8b, \ad\().8b, \ad\().8b, #3 // A3
  62. __pmull_p8_\bd \rq, \ad
  63. .endm
  64. .macro __pmull2_p8, rq, ad, bd
  65. tbl t3.16b, {\ad\().16b}, perm1.16b // A1
  66. tbl t5.16b, {\ad\().16b}, perm2.16b // A2
  67. tbl t7.16b, {\ad\().16b}, perm3.16b // A3
  68. __pmull2_p8_\bd \rq, \ad
  69. .endm
  70. .macro __pmull_p8_SHASH, rq, ad
  71. __pmull_p8_tail \rq, \ad\().8b, SHASH.8b, 8b,, sh1, sh2, sh3, sh4
  72. .endm
  73. .macro __pmull_p8_SHASH2, rq, ad
  74. __pmull_p8_tail \rq, \ad\().8b, SHASH2.8b, 8b,, ss1, ss2, ss3, ss4
  75. .endm
  76. .macro __pmull2_p8_SHASH, rq, ad
  77. __pmull_p8_tail \rq, \ad\().16b, SHASH.16b, 16b, 2, sh1, sh2, sh3, sh4
  78. .endm
  79. .macro __pmull_p8_tail, rq, ad, bd, nb, t, b1, b2, b3, b4
  80. pmull\t t3.8h, t3.\nb, \bd // F = A1*B
  81. pmull\t t4.8h, \ad, \b1\().\nb // E = A*B1
  82. pmull\t t5.8h, t5.\nb, \bd // H = A2*B
  83. pmull\t t6.8h, \ad, \b2\().\nb // G = A*B2
  84. pmull\t t7.8h, t7.\nb, \bd // J = A3*B
  85. pmull\t t8.8h, \ad, \b3\().\nb // I = A*B3
  86. pmull\t t9.8h, \ad, \b4\().\nb // K = A*B4
  87. pmull\t \rq\().8h, \ad, \bd // D = A*B
  88. eor t3.16b, t3.16b, t4.16b // L = E + F
  89. eor t5.16b, t5.16b, t6.16b // M = G + H
  90. eor t7.16b, t7.16b, t8.16b // N = I + J
  91. uzp1 t4.2d, t3.2d, t5.2d
  92. uzp2 t3.2d, t3.2d, t5.2d
  93. uzp1 t6.2d, t7.2d, t9.2d
  94. uzp2 t7.2d, t7.2d, t9.2d
  95. // t3 = (L) (P0 + P1) << 8
  96. // t5 = (M) (P2 + P3) << 16
  97. eor t4.16b, t4.16b, t3.16b
  98. and t3.16b, t3.16b, k32_48.16b
  99. // t7 = (N) (P4 + P5) << 24
  100. // t9 = (K) (P6 + P7) << 32
  101. eor t6.16b, t6.16b, t7.16b
  102. and t7.16b, t7.16b, k00_16.16b
  103. eor t4.16b, t4.16b, t3.16b
  104. eor t6.16b, t6.16b, t7.16b
  105. zip2 t5.2d, t4.2d, t3.2d
  106. zip1 t3.2d, t4.2d, t3.2d
  107. zip2 t9.2d, t6.2d, t7.2d
  108. zip1 t7.2d, t6.2d, t7.2d
  109. ext t3.16b, t3.16b, t3.16b, #15
  110. ext t5.16b, t5.16b, t5.16b, #14
  111. ext t7.16b, t7.16b, t7.16b, #13
  112. ext t9.16b, t9.16b, t9.16b, #12
  113. eor t3.16b, t3.16b, t5.16b
  114. eor t7.16b, t7.16b, t9.16b
  115. eor \rq\().16b, \rq\().16b, t3.16b
  116. eor \rq\().16b, \rq\().16b, t7.16b
  117. .endm
  118. .macro __pmull_pre_p64
  119. add x8, x3, #16
  120. ld1 {HH.2d-HH4.2d}, [x8]
  121. trn1 SHASH2.2d, SHASH.2d, HH.2d
  122. trn2 T1.2d, SHASH.2d, HH.2d
  123. eor SHASH2.16b, SHASH2.16b, T1.16b
  124. trn1 HH34.2d, HH3.2d, HH4.2d
  125. trn2 T1.2d, HH3.2d, HH4.2d
  126. eor HH34.16b, HH34.16b, T1.16b
  127. movi MASK.16b, #0xe1
  128. shl MASK.2d, MASK.2d, #57
  129. .endm
  130. .macro __pmull_pre_p8
  131. ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
  132. eor SHASH2.16b, SHASH2.16b, SHASH.16b
  133. // k00_16 := 0x0000000000000000_000000000000ffff
  134. // k32_48 := 0x00000000ffffffff_0000ffffffffffff
  135. movi k32_48.2d, #0xffffffff
  136. mov k32_48.h[2], k32_48.h[0]
  137. ushr k00_16.2d, k32_48.2d, #32
  138. // prepare the permutation vectors
  139. mov_q x5, 0x080f0e0d0c0b0a09
  140. movi T1.8b, #8
  141. dup perm1.2d, x5
  142. eor perm1.16b, perm1.16b, T1.16b
  143. ushr perm2.2d, perm1.2d, #8
  144. ushr perm3.2d, perm1.2d, #16
  145. ushr T1.2d, perm1.2d, #24
  146. sli perm2.2d, perm1.2d, #56
  147. sli perm3.2d, perm1.2d, #48
  148. sli T1.2d, perm1.2d, #40
  149. // precompute loop invariants
  150. tbl sh1.16b, {SHASH.16b}, perm1.16b
  151. tbl sh2.16b, {SHASH.16b}, perm2.16b
  152. tbl sh3.16b, {SHASH.16b}, perm3.16b
  153. tbl sh4.16b, {SHASH.16b}, T1.16b
  154. ext ss1.8b, SHASH2.8b, SHASH2.8b, #1
  155. ext ss2.8b, SHASH2.8b, SHASH2.8b, #2
  156. ext ss3.8b, SHASH2.8b, SHASH2.8b, #3
  157. ext ss4.8b, SHASH2.8b, SHASH2.8b, #4
  158. .endm
  159. //
  160. // PMULL (64x64->128) based reduction for CPUs that can do
  161. // it in a single instruction.
  162. //
  163. .macro __pmull_reduce_p64
  164. pmull T2.1q, XL.1d, MASK.1d
  165. eor XM.16b, XM.16b, T1.16b
  166. mov XH.d[0], XM.d[1]
  167. mov XM.d[1], XL.d[0]
  168. eor XL.16b, XM.16b, T2.16b
  169. ext T2.16b, XL.16b, XL.16b, #8
  170. pmull XL.1q, XL.1d, MASK.1d
  171. .endm
  172. //
  173. // Alternative reduction for CPUs that lack support for the
  174. // 64x64->128 PMULL instruction
  175. //
  176. .macro __pmull_reduce_p8
  177. eor XM.16b, XM.16b, T1.16b
  178. mov XL.d[1], XM.d[0]
  179. mov XH.d[0], XM.d[1]
  180. shl T1.2d, XL.2d, #57
  181. shl T2.2d, XL.2d, #62
  182. eor T2.16b, T2.16b, T1.16b
  183. shl T1.2d, XL.2d, #63
  184. eor T2.16b, T2.16b, T1.16b
  185. ext T1.16b, XL.16b, XH.16b, #8
  186. eor T2.16b, T2.16b, T1.16b
  187. mov XL.d[1], T2.d[0]
  188. mov XH.d[0], T2.d[1]
  189. ushr T2.2d, XL.2d, #1
  190. eor XH.16b, XH.16b, XL.16b
  191. eor XL.16b, XL.16b, T2.16b
  192. ushr T2.2d, T2.2d, #6
  193. ushr XL.2d, XL.2d, #1
  194. .endm
  195. .macro __pmull_ghash, pn
  196. ld1 {SHASH.2d}, [x3]
  197. ld1 {XL.2d}, [x1]
  198. __pmull_pre_\pn
  199. /* do the head block first, if supplied */
  200. cbz x4, 0f
  201. ld1 {T1.2d}, [x4]
  202. mov x4, xzr
  203. b 3f
  204. 0: .ifc \pn, p64
  205. tbnz w0, #0, 2f // skip until #blocks is a
  206. tbnz w0, #1, 2f // round multiple of 4
  207. 1: ld1 {XM3.16b-TT4.16b}, [x2], #64
  208. sub w0, w0, #4
  209. rev64 T1.16b, XM3.16b
  210. rev64 T2.16b, XH3.16b
  211. rev64 TT4.16b, TT4.16b
  212. rev64 TT3.16b, TT3.16b
  213. ext IN1.16b, TT4.16b, TT4.16b, #8
  214. ext XL3.16b, TT3.16b, TT3.16b, #8
  215. eor TT4.16b, TT4.16b, IN1.16b
  216. pmull2 XH2.1q, SHASH.2d, IN1.2d // a1 * b1
  217. pmull XL2.1q, SHASH.1d, IN1.1d // a0 * b0
  218. pmull XM2.1q, SHASH2.1d, TT4.1d // (a1 + a0)(b1 + b0)
  219. eor TT3.16b, TT3.16b, XL3.16b
  220. pmull2 XH3.1q, HH.2d, XL3.2d // a1 * b1
  221. pmull XL3.1q, HH.1d, XL3.1d // a0 * b0
  222. pmull2 XM3.1q, SHASH2.2d, TT3.2d // (a1 + a0)(b1 + b0)
  223. ext IN1.16b, T2.16b, T2.16b, #8
  224. eor XL2.16b, XL2.16b, XL3.16b
  225. eor XH2.16b, XH2.16b, XH3.16b
  226. eor XM2.16b, XM2.16b, XM3.16b
  227. eor T2.16b, T2.16b, IN1.16b
  228. pmull2 XH3.1q, HH3.2d, IN1.2d // a1 * b1
  229. pmull XL3.1q, HH3.1d, IN1.1d // a0 * b0
  230. pmull XM3.1q, HH34.1d, T2.1d // (a1 + a0)(b1 + b0)
  231. eor XL2.16b, XL2.16b, XL3.16b
  232. eor XH2.16b, XH2.16b, XH3.16b
  233. eor XM2.16b, XM2.16b, XM3.16b
  234. ext IN1.16b, T1.16b, T1.16b, #8
  235. ext TT3.16b, XL.16b, XL.16b, #8
  236. eor XL.16b, XL.16b, IN1.16b
  237. eor T1.16b, T1.16b, TT3.16b
  238. pmull2 XH.1q, HH4.2d, XL.2d // a1 * b1
  239. eor T1.16b, T1.16b, XL.16b
  240. pmull XL.1q, HH4.1d, XL.1d // a0 * b0
  241. pmull2 XM.1q, HH34.2d, T1.2d // (a1 + a0)(b1 + b0)
  242. eor XL.16b, XL.16b, XL2.16b
  243. eor XH.16b, XH.16b, XH2.16b
  244. eor XM.16b, XM.16b, XM2.16b
  245. eor T2.16b, XL.16b, XH.16b
  246. ext T1.16b, XL.16b, XH.16b, #8
  247. eor XM.16b, XM.16b, T2.16b
  248. __pmull_reduce_p64
  249. eor T2.16b, T2.16b, XH.16b
  250. eor XL.16b, XL.16b, T2.16b
  251. cbz w0, 5f
  252. b 1b
  253. .endif
  254. 2: ld1 {T1.2d}, [x2], #16
  255. sub w0, w0, #1
  256. 3: /* multiply XL by SHASH in GF(2^128) */
  257. CPU_LE( rev64 T1.16b, T1.16b )
  258. ext T2.16b, XL.16b, XL.16b, #8
  259. ext IN1.16b, T1.16b, T1.16b, #8
  260. eor T1.16b, T1.16b, T2.16b
  261. eor XL.16b, XL.16b, IN1.16b
  262. __pmull2_\pn XH, XL, SHASH // a1 * b1
  263. eor T1.16b, T1.16b, XL.16b
  264. __pmull_\pn XL, XL, SHASH // a0 * b0
  265. __pmull_\pn XM, T1, SHASH2 // (a1 + a0)(b1 + b0)
  266. 4: eor T2.16b, XL.16b, XH.16b
  267. ext T1.16b, XL.16b, XH.16b, #8
  268. eor XM.16b, XM.16b, T2.16b
  269. __pmull_reduce_\pn
  270. eor T2.16b, T2.16b, XH.16b
  271. eor XL.16b, XL.16b, T2.16b
  272. cbnz w0, 0b
  273. 5: st1 {XL.2d}, [x1]
  274. ret
  275. .endm
  276. /*
  277. * void pmull_ghash_update(int blocks, u64 dg[], const char *src,
  278. * struct ghash_key const *k, const char *head)
  279. */
  280. SYM_FUNC_START(pmull_ghash_update_p64)
  281. __pmull_ghash p64
  282. SYM_FUNC_END(pmull_ghash_update_p64)
  283. SYM_FUNC_START(pmull_ghash_update_p8)
  284. __pmull_ghash p8
  285. SYM_FUNC_END(pmull_ghash_update_p8)
  286. KS0 .req v8
  287. KS1 .req v9
  288. KS2 .req v10
  289. KS3 .req v11
  290. INP0 .req v21
  291. INP1 .req v22
  292. INP2 .req v23
  293. INP3 .req v24
  294. K0 .req v25
  295. K1 .req v26
  296. K2 .req v27
  297. K3 .req v28
  298. K4 .req v12
  299. K5 .req v13
  300. K6 .req v4
  301. K7 .req v5
  302. K8 .req v14
  303. K9 .req v15
  304. KK .req v29
  305. KL .req v30
  306. KM .req v31
  307. .macro load_round_keys, rounds, rk, tmp
  308. add \tmp, \rk, #64
  309. ld1 {K0.4s-K3.4s}, [\rk]
  310. ld1 {K4.4s-K5.4s}, [\tmp]
  311. add \tmp, \rk, \rounds, lsl #4
  312. sub \tmp, \tmp, #32
  313. ld1 {KK.4s-KM.4s}, [\tmp]
  314. .endm
  315. .macro enc_round, state, key
  316. aese \state\().16b, \key\().16b
  317. aesmc \state\().16b, \state\().16b
  318. .endm
  319. .macro enc_qround, s0, s1, s2, s3, key
  320. enc_round \s0, \key
  321. enc_round \s1, \key
  322. enc_round \s2, \key
  323. enc_round \s3, \key
  324. .endm
  325. .macro enc_block, state, rounds, rk, tmp
  326. add \tmp, \rk, #96
  327. ld1 {K6.4s-K7.4s}, [\tmp], #32
  328. .irp key, K0, K1, K2, K3, K4 K5
  329. enc_round \state, \key
  330. .endr
  331. tbnz \rounds, #2, .Lnot128_\@
  332. .Lout256_\@:
  333. enc_round \state, K6
  334. enc_round \state, K7
  335. .Lout192_\@:
  336. enc_round \state, KK
  337. aese \state\().16b, KL.16b
  338. eor \state\().16b, \state\().16b, KM.16b
  339. .subsection 1
  340. .Lnot128_\@:
  341. ld1 {K8.4s-K9.4s}, [\tmp], #32
  342. enc_round \state, K6
  343. enc_round \state, K7
  344. ld1 {K6.4s-K7.4s}, [\tmp]
  345. enc_round \state, K8
  346. enc_round \state, K9
  347. tbz \rounds, #1, .Lout192_\@
  348. b .Lout256_\@
  349. .previous
  350. .endm
  351. .align 6
  352. .macro pmull_gcm_do_crypt, enc
  353. stp x29, x30, [sp, #-32]!
  354. mov x29, sp
  355. str x19, [sp, #24]
  356. load_round_keys x7, x6, x8
  357. ld1 {SHASH.2d}, [x3], #16
  358. ld1 {HH.2d-HH4.2d}, [x3]
  359. trn1 SHASH2.2d, SHASH.2d, HH.2d
  360. trn2 T1.2d, SHASH.2d, HH.2d
  361. eor SHASH2.16b, SHASH2.16b, T1.16b
  362. trn1 HH34.2d, HH3.2d, HH4.2d
  363. trn2 T1.2d, HH3.2d, HH4.2d
  364. eor HH34.16b, HH34.16b, T1.16b
  365. ld1 {XL.2d}, [x4]
  366. cbz x0, 3f // tag only?
  367. ldr w8, [x5, #12] // load lower counter
  368. CPU_LE( rev w8, w8 )
  369. 0: mov w9, #4 // max blocks per round
  370. add x10, x0, #0xf
  371. lsr x10, x10, #4 // remaining blocks
  372. subs x0, x0, #64
  373. csel w9, w10, w9, mi
  374. add w8, w8, w9
  375. bmi 1f
  376. ld1 {INP0.16b-INP3.16b}, [x2], #64
  377. .subsection 1
  378. /*
  379. * Populate the four input registers right to left with up to 63 bytes
  380. * of data, using overlapping loads to avoid branches.
  381. *
  382. * INP0 INP1 INP2 INP3
  383. * 1 byte | | | |x |
  384. * 16 bytes | | | |xxxxxxxx|
  385. * 17 bytes | | |xxxxxxxx|x |
  386. * 47 bytes | |xxxxxxxx|xxxxxxxx|xxxxxxx |
  387. * etc etc
  388. *
  389. * Note that this code may read up to 15 bytes before the start of
  390. * the input. It is up to the calling code to ensure this is safe if
  391. * this happens in the first iteration of the loop (i.e., when the
  392. * input size is < 16 bytes)
  393. */
  394. 1: mov x15, #16
  395. ands x19, x0, #0xf
  396. csel x19, x19, x15, ne
  397. adr_l x17, .Lpermute_table + 16
  398. sub x11, x15, x19
  399. add x12, x17, x11
  400. sub x17, x17, x11
  401. ld1 {T1.16b}, [x12]
  402. sub x10, x1, x11
  403. sub x11, x2, x11
  404. cmp x0, #-16
  405. csel x14, x15, xzr, gt
  406. cmp x0, #-32
  407. csel x15, x15, xzr, gt
  408. cmp x0, #-48
  409. csel x16, x19, xzr, gt
  410. csel x1, x1, x10, gt
  411. csel x2, x2, x11, gt
  412. ld1 {INP0.16b}, [x2], x14
  413. ld1 {INP1.16b}, [x2], x15
  414. ld1 {INP2.16b}, [x2], x16
  415. ld1 {INP3.16b}, [x2]
  416. tbl INP3.16b, {INP3.16b}, T1.16b
  417. b 2f
  418. .previous
  419. 2: .if \enc == 0
  420. bl pmull_gcm_ghash_4x
  421. .endif
  422. bl pmull_gcm_enc_4x
  423. tbnz x0, #63, 6f
  424. st1 {INP0.16b-INP3.16b}, [x1], #64
  425. .if \enc == 1
  426. bl pmull_gcm_ghash_4x
  427. .endif
  428. bne 0b
  429. 3: ldp x19, x10, [sp, #24]
  430. cbz x10, 5f // output tag?
  431. ld1 {INP3.16b}, [x10] // load lengths[]
  432. mov w9, #1
  433. bl pmull_gcm_ghash_4x
  434. mov w11, #(0x1 << 24) // BE '1U'
  435. ld1 {KS0.16b}, [x5]
  436. mov KS0.s[3], w11
  437. enc_block KS0, x7, x6, x12
  438. ext XL.16b, XL.16b, XL.16b, #8
  439. rev64 XL.16b, XL.16b
  440. eor XL.16b, XL.16b, KS0.16b
  441. .if \enc == 1
  442. st1 {XL.16b}, [x10] // store tag
  443. .else
  444. ldp x11, x12, [sp, #40] // load tag pointer and authsize
  445. adr_l x17, .Lpermute_table
  446. ld1 {KS0.16b}, [x11] // load supplied tag
  447. add x17, x17, x12
  448. ld1 {KS1.16b}, [x17] // load permute vector
  449. cmeq XL.16b, XL.16b, KS0.16b // compare tags
  450. mvn XL.16b, XL.16b // -1 for fail, 0 for pass
  451. tbl XL.16b, {XL.16b}, KS1.16b // keep authsize bytes only
  452. sminv b0, XL.16b // signed minimum across XL
  453. smov w0, v0.b[0] // return b0
  454. .endif
  455. 4: ldp x29, x30, [sp], #32
  456. ret
  457. 5:
  458. CPU_LE( rev w8, w8 )
  459. str w8, [x5, #12] // store lower counter
  460. st1 {XL.2d}, [x4]
  461. b 4b
  462. 6: ld1 {T1.16b-T2.16b}, [x17], #32 // permute vectors
  463. sub x17, x17, x19, lsl #1
  464. cmp w9, #1
  465. beq 7f
  466. .subsection 1
  467. 7: ld1 {INP2.16b}, [x1]
  468. tbx INP2.16b, {INP3.16b}, T1.16b
  469. mov INP3.16b, INP2.16b
  470. b 8f
  471. .previous
  472. st1 {INP0.16b}, [x1], x14
  473. st1 {INP1.16b}, [x1], x15
  474. st1 {INP2.16b}, [x1], x16
  475. tbl INP3.16b, {INP3.16b}, T1.16b
  476. tbx INP3.16b, {INP2.16b}, T2.16b
  477. 8: st1 {INP3.16b}, [x1]
  478. .if \enc == 1
  479. ld1 {T1.16b}, [x17]
  480. tbl INP3.16b, {INP3.16b}, T1.16b // clear non-data bits
  481. bl pmull_gcm_ghash_4x
  482. .endif
  483. b 3b
  484. .endm
  485. /*
  486. * void pmull_gcm_encrypt(int blocks, u8 dst[], const u8 src[],
  487. * struct ghash_key const *k, u64 dg[], u8 ctr[],
  488. * int rounds, u8 tag)
  489. */
  490. SYM_FUNC_START(pmull_gcm_encrypt)
  491. pmull_gcm_do_crypt 1
  492. SYM_FUNC_END(pmull_gcm_encrypt)
  493. /*
  494. * void pmull_gcm_decrypt(int blocks, u8 dst[], const u8 src[],
  495. * struct ghash_key const *k, u64 dg[], u8 ctr[],
  496. * int rounds, u8 tag)
  497. */
  498. SYM_FUNC_START(pmull_gcm_decrypt)
  499. pmull_gcm_do_crypt 0
  500. SYM_FUNC_END(pmull_gcm_decrypt)
  501. SYM_FUNC_START_LOCAL(pmull_gcm_ghash_4x)
  502. movi MASK.16b, #0xe1
  503. shl MASK.2d, MASK.2d, #57
  504. rev64 T1.16b, INP0.16b
  505. rev64 T2.16b, INP1.16b
  506. rev64 TT3.16b, INP2.16b
  507. rev64 TT4.16b, INP3.16b
  508. ext XL.16b, XL.16b, XL.16b, #8
  509. tbz w9, #2, 0f // <4 blocks?
  510. .subsection 1
  511. 0: movi XH2.16b, #0
  512. movi XM2.16b, #0
  513. movi XL2.16b, #0
  514. tbz w9, #0, 1f // 2 blocks?
  515. tbz w9, #1, 2f // 1 block?
  516. eor T2.16b, T2.16b, XL.16b
  517. ext T1.16b, T2.16b, T2.16b, #8
  518. b .Lgh3
  519. 1: eor TT3.16b, TT3.16b, XL.16b
  520. ext T2.16b, TT3.16b, TT3.16b, #8
  521. b .Lgh2
  522. 2: eor TT4.16b, TT4.16b, XL.16b
  523. ext IN1.16b, TT4.16b, TT4.16b, #8
  524. b .Lgh1
  525. .previous
  526. eor T1.16b, T1.16b, XL.16b
  527. ext IN1.16b, T1.16b, T1.16b, #8
  528. pmull2 XH2.1q, HH4.2d, IN1.2d // a1 * b1
  529. eor T1.16b, T1.16b, IN1.16b
  530. pmull XL2.1q, HH4.1d, IN1.1d // a0 * b0
  531. pmull2 XM2.1q, HH34.2d, T1.2d // (a1 + a0)(b1 + b0)
  532. ext T1.16b, T2.16b, T2.16b, #8
  533. .Lgh3: eor T2.16b, T2.16b, T1.16b
  534. pmull2 XH.1q, HH3.2d, T1.2d // a1 * b1
  535. pmull XL.1q, HH3.1d, T1.1d // a0 * b0
  536. pmull XM.1q, HH34.1d, T2.1d // (a1 + a0)(b1 + b0)
  537. eor XH2.16b, XH2.16b, XH.16b
  538. eor XL2.16b, XL2.16b, XL.16b
  539. eor XM2.16b, XM2.16b, XM.16b
  540. ext T2.16b, TT3.16b, TT3.16b, #8
  541. .Lgh2: eor TT3.16b, TT3.16b, T2.16b
  542. pmull2 XH.1q, HH.2d, T2.2d // a1 * b1
  543. pmull XL.1q, HH.1d, T2.1d // a0 * b0
  544. pmull2 XM.1q, SHASH2.2d, TT3.2d // (a1 + a0)(b1 + b0)
  545. eor XH2.16b, XH2.16b, XH.16b
  546. eor XL2.16b, XL2.16b, XL.16b
  547. eor XM2.16b, XM2.16b, XM.16b
  548. ext IN1.16b, TT4.16b, TT4.16b, #8
  549. .Lgh1: eor TT4.16b, TT4.16b, IN1.16b
  550. pmull XL.1q, SHASH.1d, IN1.1d // a0 * b0
  551. pmull2 XH.1q, SHASH.2d, IN1.2d // a1 * b1
  552. pmull XM.1q, SHASH2.1d, TT4.1d // (a1 + a0)(b1 + b0)
  553. eor XH.16b, XH.16b, XH2.16b
  554. eor XL.16b, XL.16b, XL2.16b
  555. eor XM.16b, XM.16b, XM2.16b
  556. eor T2.16b, XL.16b, XH.16b
  557. ext T1.16b, XL.16b, XH.16b, #8
  558. eor XM.16b, XM.16b, T2.16b
  559. __pmull_reduce_p64
  560. eor T2.16b, T2.16b, XH.16b
  561. eor XL.16b, XL.16b, T2.16b
  562. ret
  563. SYM_FUNC_END(pmull_gcm_ghash_4x)
  564. SYM_FUNC_START_LOCAL(pmull_gcm_enc_4x)
  565. ld1 {KS0.16b}, [x5] // load upper counter
  566. sub w10, w8, #4
  567. sub w11, w8, #3
  568. sub w12, w8, #2
  569. sub w13, w8, #1
  570. rev w10, w10
  571. rev w11, w11
  572. rev w12, w12
  573. rev w13, w13
  574. mov KS1.16b, KS0.16b
  575. mov KS2.16b, KS0.16b
  576. mov KS3.16b, KS0.16b
  577. ins KS0.s[3], w10 // set lower counter
  578. ins KS1.s[3], w11
  579. ins KS2.s[3], w12
  580. ins KS3.s[3], w13
  581. add x10, x6, #96 // round key pointer
  582. ld1 {K6.4s-K7.4s}, [x10], #32
  583. .irp key, K0, K1, K2, K3, K4, K5
  584. enc_qround KS0, KS1, KS2, KS3, \key
  585. .endr
  586. tbnz x7, #2, .Lnot128
  587. .subsection 1
  588. .Lnot128:
  589. ld1 {K8.4s-K9.4s}, [x10], #32
  590. .irp key, K6, K7
  591. enc_qround KS0, KS1, KS2, KS3, \key
  592. .endr
  593. ld1 {K6.4s-K7.4s}, [x10]
  594. .irp key, K8, K9
  595. enc_qround KS0, KS1, KS2, KS3, \key
  596. .endr
  597. tbz x7, #1, .Lout192
  598. b .Lout256
  599. .previous
  600. .Lout256:
  601. .irp key, K6, K7
  602. enc_qround KS0, KS1, KS2, KS3, \key
  603. .endr
  604. .Lout192:
  605. enc_qround KS0, KS1, KS2, KS3, KK
  606. aese KS0.16b, KL.16b
  607. aese KS1.16b, KL.16b
  608. aese KS2.16b, KL.16b
  609. aese KS3.16b, KL.16b
  610. eor KS0.16b, KS0.16b, KM.16b
  611. eor KS1.16b, KS1.16b, KM.16b
  612. eor KS2.16b, KS2.16b, KM.16b
  613. eor KS3.16b, KS3.16b, KM.16b
  614. eor INP0.16b, INP0.16b, KS0.16b
  615. eor INP1.16b, INP1.16b, KS1.16b
  616. eor INP2.16b, INP2.16b, KS2.16b
  617. eor INP3.16b, INP3.16b, KS3.16b
  618. ret
  619. SYM_FUNC_END(pmull_gcm_enc_4x)
  620. .section ".rodata", "a"
  621. .align 6
  622. .Lpermute_table:
  623. .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
  624. .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
  625. .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
  626. .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf
  627. .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
  628. .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
  629. .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
  630. .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf
  631. .previous