aes-modes.S 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * linux/arch/arm64/crypto/aes-modes.S - chaining mode wrappers for AES
  4. *
  5. * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
  6. */
  7. /* included by aes-ce.S and aes-neon.S */
  8. .text
  9. .align 4
  10. #ifndef MAX_STRIDE
  11. #define MAX_STRIDE 4
  12. #endif
  13. #if MAX_STRIDE == 4
  14. #define ST4(x...) x
  15. #define ST5(x...)
  16. #else
  17. #define ST4(x...)
  18. #define ST5(x...) x
  19. #endif
  20. SYM_FUNC_START_LOCAL(aes_encrypt_block4x)
  21. encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
  22. ret
  23. SYM_FUNC_END(aes_encrypt_block4x)
  24. SYM_FUNC_START_LOCAL(aes_decrypt_block4x)
  25. decrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
  26. ret
  27. SYM_FUNC_END(aes_decrypt_block4x)
  28. #if MAX_STRIDE == 5
  29. SYM_FUNC_START_LOCAL(aes_encrypt_block5x)
  30. encrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7
  31. ret
  32. SYM_FUNC_END(aes_encrypt_block5x)
  33. SYM_FUNC_START_LOCAL(aes_decrypt_block5x)
  34. decrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7
  35. ret
  36. SYM_FUNC_END(aes_decrypt_block5x)
  37. #endif
  38. /*
  39. * aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
  40. * int blocks)
  41. * aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
  42. * int blocks)
  43. */
  44. AES_FUNC_START(aes_ecb_encrypt)
  45. stp x29, x30, [sp, #-16]!
  46. mov x29, sp
  47. enc_prepare w3, x2, x5
  48. .LecbencloopNx:
  49. subs w4, w4, #MAX_STRIDE
  50. bmi .Lecbenc1x
  51. ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
  52. ST4( bl aes_encrypt_block4x )
  53. ST5( ld1 {v4.16b}, [x1], #16 )
  54. ST5( bl aes_encrypt_block5x )
  55. st1 {v0.16b-v3.16b}, [x0], #64
  56. ST5( st1 {v4.16b}, [x0], #16 )
  57. b .LecbencloopNx
  58. .Lecbenc1x:
  59. adds w4, w4, #MAX_STRIDE
  60. beq .Lecbencout
  61. .Lecbencloop:
  62. ld1 {v0.16b}, [x1], #16 /* get next pt block */
  63. encrypt_block v0, w3, x2, x5, w6
  64. st1 {v0.16b}, [x0], #16
  65. subs w4, w4, #1
  66. bne .Lecbencloop
  67. .Lecbencout:
  68. ldp x29, x30, [sp], #16
  69. ret
  70. AES_FUNC_END(aes_ecb_encrypt)
  71. AES_FUNC_START(aes_ecb_decrypt)
  72. stp x29, x30, [sp, #-16]!
  73. mov x29, sp
  74. dec_prepare w3, x2, x5
  75. .LecbdecloopNx:
  76. subs w4, w4, #MAX_STRIDE
  77. bmi .Lecbdec1x
  78. ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
  79. ST4( bl aes_decrypt_block4x )
  80. ST5( ld1 {v4.16b}, [x1], #16 )
  81. ST5( bl aes_decrypt_block5x )
  82. st1 {v0.16b-v3.16b}, [x0], #64
  83. ST5( st1 {v4.16b}, [x0], #16 )
  84. b .LecbdecloopNx
  85. .Lecbdec1x:
  86. adds w4, w4, #MAX_STRIDE
  87. beq .Lecbdecout
  88. .Lecbdecloop:
  89. ld1 {v0.16b}, [x1], #16 /* get next ct block */
  90. decrypt_block v0, w3, x2, x5, w6
  91. st1 {v0.16b}, [x0], #16
  92. subs w4, w4, #1
  93. bne .Lecbdecloop
  94. .Lecbdecout:
  95. ldp x29, x30, [sp], #16
  96. ret
  97. AES_FUNC_END(aes_ecb_decrypt)
  98. /*
  99. * aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
  100. * int blocks, u8 iv[])
  101. * aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
  102. * int blocks, u8 iv[])
  103. * aes_essiv_cbc_encrypt(u8 out[], u8 const in[], u32 const rk1[],
  104. * int rounds, int blocks, u8 iv[],
  105. * u32 const rk2[]);
  106. * aes_essiv_cbc_decrypt(u8 out[], u8 const in[], u32 const rk1[],
  107. * int rounds, int blocks, u8 iv[],
  108. * u32 const rk2[]);
  109. */
  110. AES_FUNC_START(aes_essiv_cbc_encrypt)
  111. ld1 {v4.16b}, [x5] /* get iv */
  112. mov w8, #14 /* AES-256: 14 rounds */
  113. enc_prepare w8, x6, x7
  114. encrypt_block v4, w8, x6, x7, w9
  115. enc_switch_key w3, x2, x6
  116. b .Lcbcencloop4x
  117. AES_FUNC_START(aes_cbc_encrypt)
  118. ld1 {v4.16b}, [x5] /* get iv */
  119. enc_prepare w3, x2, x6
  120. .Lcbcencloop4x:
  121. subs w4, w4, #4
  122. bmi .Lcbcenc1x
  123. ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
  124. eor v0.16b, v0.16b, v4.16b /* ..and xor with iv */
  125. encrypt_block v0, w3, x2, x6, w7
  126. eor v1.16b, v1.16b, v0.16b
  127. encrypt_block v1, w3, x2, x6, w7
  128. eor v2.16b, v2.16b, v1.16b
  129. encrypt_block v2, w3, x2, x6, w7
  130. eor v3.16b, v3.16b, v2.16b
  131. encrypt_block v3, w3, x2, x6, w7
  132. st1 {v0.16b-v3.16b}, [x0], #64
  133. mov v4.16b, v3.16b
  134. b .Lcbcencloop4x
  135. .Lcbcenc1x:
  136. adds w4, w4, #4
  137. beq .Lcbcencout
  138. .Lcbcencloop:
  139. ld1 {v0.16b}, [x1], #16 /* get next pt block */
  140. eor v4.16b, v4.16b, v0.16b /* ..and xor with iv */
  141. encrypt_block v4, w3, x2, x6, w7
  142. st1 {v4.16b}, [x0], #16
  143. subs w4, w4, #1
  144. bne .Lcbcencloop
  145. .Lcbcencout:
  146. st1 {v4.16b}, [x5] /* return iv */
  147. ret
  148. AES_FUNC_END(aes_cbc_encrypt)
  149. AES_FUNC_END(aes_essiv_cbc_encrypt)
  150. AES_FUNC_START(aes_essiv_cbc_decrypt)
  151. stp x29, x30, [sp, #-16]!
  152. mov x29, sp
  153. ld1 {cbciv.16b}, [x5] /* get iv */
  154. mov w8, #14 /* AES-256: 14 rounds */
  155. enc_prepare w8, x6, x7
  156. encrypt_block cbciv, w8, x6, x7, w9
  157. b .Lessivcbcdecstart
  158. AES_FUNC_START(aes_cbc_decrypt)
  159. stp x29, x30, [sp, #-16]!
  160. mov x29, sp
  161. ld1 {cbciv.16b}, [x5] /* get iv */
  162. .Lessivcbcdecstart:
  163. dec_prepare w3, x2, x6
  164. .LcbcdecloopNx:
  165. subs w4, w4, #MAX_STRIDE
  166. bmi .Lcbcdec1x
  167. ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
  168. #if MAX_STRIDE == 5
  169. ld1 {v4.16b}, [x1], #16 /* get 1 ct block */
  170. mov v5.16b, v0.16b
  171. mov v6.16b, v1.16b
  172. mov v7.16b, v2.16b
  173. bl aes_decrypt_block5x
  174. sub x1, x1, #32
  175. eor v0.16b, v0.16b, cbciv.16b
  176. eor v1.16b, v1.16b, v5.16b
  177. ld1 {v5.16b}, [x1], #16 /* reload 1 ct block */
  178. ld1 {cbciv.16b}, [x1], #16 /* reload 1 ct block */
  179. eor v2.16b, v2.16b, v6.16b
  180. eor v3.16b, v3.16b, v7.16b
  181. eor v4.16b, v4.16b, v5.16b
  182. #else
  183. mov v4.16b, v0.16b
  184. mov v5.16b, v1.16b
  185. mov v6.16b, v2.16b
  186. bl aes_decrypt_block4x
  187. sub x1, x1, #16
  188. eor v0.16b, v0.16b, cbciv.16b
  189. eor v1.16b, v1.16b, v4.16b
  190. ld1 {cbciv.16b}, [x1], #16 /* reload 1 ct block */
  191. eor v2.16b, v2.16b, v5.16b
  192. eor v3.16b, v3.16b, v6.16b
  193. #endif
  194. st1 {v0.16b-v3.16b}, [x0], #64
  195. ST5( st1 {v4.16b}, [x0], #16 )
  196. b .LcbcdecloopNx
  197. .Lcbcdec1x:
  198. adds w4, w4, #MAX_STRIDE
  199. beq .Lcbcdecout
  200. .Lcbcdecloop:
  201. ld1 {v1.16b}, [x1], #16 /* get next ct block */
  202. mov v0.16b, v1.16b /* ...and copy to v0 */
  203. decrypt_block v0, w3, x2, x6, w7
  204. eor v0.16b, v0.16b, cbciv.16b /* xor with iv => pt */
  205. mov cbciv.16b, v1.16b /* ct is next iv */
  206. st1 {v0.16b}, [x0], #16
  207. subs w4, w4, #1
  208. bne .Lcbcdecloop
  209. .Lcbcdecout:
  210. st1 {cbciv.16b}, [x5] /* return iv */
  211. ldp x29, x30, [sp], #16
  212. ret
  213. AES_FUNC_END(aes_cbc_decrypt)
  214. AES_FUNC_END(aes_essiv_cbc_decrypt)
  215. /*
  216. * aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[],
  217. * int rounds, int bytes, u8 const iv[])
  218. * aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[],
  219. * int rounds, int bytes, u8 const iv[])
  220. */
  221. AES_FUNC_START(aes_cbc_cts_encrypt)
  222. adr_l x8, .Lcts_permute_table
  223. sub x4, x4, #16
  224. add x9, x8, #32
  225. add x8, x8, x4
  226. sub x9, x9, x4
  227. ld1 {v3.16b}, [x8]
  228. ld1 {v4.16b}, [x9]
  229. ld1 {v0.16b}, [x1], x4 /* overlapping loads */
  230. ld1 {v1.16b}, [x1]
  231. ld1 {v5.16b}, [x5] /* get iv */
  232. enc_prepare w3, x2, x6
  233. eor v0.16b, v0.16b, v5.16b /* xor with iv */
  234. tbl v1.16b, {v1.16b}, v4.16b
  235. encrypt_block v0, w3, x2, x6, w7
  236. eor v1.16b, v1.16b, v0.16b
  237. tbl v0.16b, {v0.16b}, v3.16b
  238. encrypt_block v1, w3, x2, x6, w7
  239. add x4, x0, x4
  240. st1 {v0.16b}, [x4] /* overlapping stores */
  241. st1 {v1.16b}, [x0]
  242. ret
  243. AES_FUNC_END(aes_cbc_cts_encrypt)
  244. AES_FUNC_START(aes_cbc_cts_decrypt)
  245. adr_l x8, .Lcts_permute_table
  246. sub x4, x4, #16
  247. add x9, x8, #32
  248. add x8, x8, x4
  249. sub x9, x9, x4
  250. ld1 {v3.16b}, [x8]
  251. ld1 {v4.16b}, [x9]
  252. ld1 {v0.16b}, [x1], x4 /* overlapping loads */
  253. ld1 {v1.16b}, [x1]
  254. ld1 {v5.16b}, [x5] /* get iv */
  255. dec_prepare w3, x2, x6
  256. decrypt_block v0, w3, x2, x6, w7
  257. tbl v2.16b, {v0.16b}, v3.16b
  258. eor v2.16b, v2.16b, v1.16b
  259. tbx v0.16b, {v1.16b}, v4.16b
  260. decrypt_block v0, w3, x2, x6, w7
  261. eor v0.16b, v0.16b, v5.16b /* xor with iv */
  262. add x4, x0, x4
  263. st1 {v2.16b}, [x4] /* overlapping stores */
  264. st1 {v0.16b}, [x0]
  265. ret
  266. AES_FUNC_END(aes_cbc_cts_decrypt)
  267. .section ".rodata", "a"
  268. .align 6
  269. .Lcts_permute_table:
  270. .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
  271. .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
  272. .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
  273. .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf
  274. .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
  275. .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
  276. .previous
  277. /*
  278. * aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
  279. * int blocks, u8 ctr[])
  280. */
  281. AES_FUNC_START(aes_ctr_encrypt)
  282. stp x29, x30, [sp, #-16]!
  283. mov x29, sp
  284. enc_prepare w3, x2, x6
  285. ld1 {vctr.16b}, [x5]
  286. umov x6, vctr.d[1] /* keep swabbed ctr in reg */
  287. rev x6, x6
  288. cmn w6, w4 /* 32 bit overflow? */
  289. bcs .Lctrloop
  290. .LctrloopNx:
  291. subs w4, w4, #MAX_STRIDE
  292. bmi .Lctr1x
  293. add w7, w6, #1
  294. mov v0.16b, vctr.16b
  295. add w8, w6, #2
  296. mov v1.16b, vctr.16b
  297. add w9, w6, #3
  298. mov v2.16b, vctr.16b
  299. add w9, w6, #3
  300. rev w7, w7
  301. mov v3.16b, vctr.16b
  302. rev w8, w8
  303. ST5( mov v4.16b, vctr.16b )
  304. mov v1.s[3], w7
  305. rev w9, w9
  306. ST5( add w10, w6, #4 )
  307. mov v2.s[3], w8
  308. ST5( rev w10, w10 )
  309. mov v3.s[3], w9
  310. ST5( mov v4.s[3], w10 )
  311. ld1 {v5.16b-v7.16b}, [x1], #48 /* get 3 input blocks */
  312. ST4( bl aes_encrypt_block4x )
  313. ST5( bl aes_encrypt_block5x )
  314. eor v0.16b, v5.16b, v0.16b
  315. ST4( ld1 {v5.16b}, [x1], #16 )
  316. eor v1.16b, v6.16b, v1.16b
  317. ST5( ld1 {v5.16b-v6.16b}, [x1], #32 )
  318. eor v2.16b, v7.16b, v2.16b
  319. eor v3.16b, v5.16b, v3.16b
  320. ST5( eor v4.16b, v6.16b, v4.16b )
  321. st1 {v0.16b-v3.16b}, [x0], #64
  322. ST5( st1 {v4.16b}, [x0], #16 )
  323. add x6, x6, #MAX_STRIDE
  324. rev x7, x6
  325. ins vctr.d[1], x7
  326. cbz w4, .Lctrout
  327. b .LctrloopNx
  328. .Lctr1x:
  329. adds w4, w4, #MAX_STRIDE
  330. beq .Lctrout
  331. .Lctrloop:
  332. mov v0.16b, vctr.16b
  333. encrypt_block v0, w3, x2, x8, w7
  334. adds x6, x6, #1 /* increment BE ctr */
  335. rev x7, x6
  336. ins vctr.d[1], x7
  337. bcs .Lctrcarry /* overflow? */
  338. .Lctrcarrydone:
  339. subs w4, w4, #1
  340. bmi .Lctrtailblock /* blocks <0 means tail block */
  341. ld1 {v3.16b}, [x1], #16
  342. eor v3.16b, v0.16b, v3.16b
  343. st1 {v3.16b}, [x0], #16
  344. bne .Lctrloop
  345. .Lctrout:
  346. st1 {vctr.16b}, [x5] /* return next CTR value */
  347. ldp x29, x30, [sp], #16
  348. ret
  349. .Lctrtailblock:
  350. st1 {v0.16b}, [x0]
  351. b .Lctrout
  352. .Lctrcarry:
  353. umov x7, vctr.d[0] /* load upper word of ctr */
  354. rev x7, x7 /* ... to handle the carry */
  355. add x7, x7, #1
  356. rev x7, x7
  357. ins vctr.d[0], x7
  358. b .Lctrcarrydone
  359. AES_FUNC_END(aes_ctr_encrypt)
  360. /*
  361. * aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds,
  362. * int bytes, u8 const rk2[], u8 iv[], int first)
  363. * aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds,
  364. * int bytes, u8 const rk2[], u8 iv[], int first)
  365. */
  366. .macro next_tweak, out, in, tmp
  367. sshr \tmp\().2d, \in\().2d, #63
  368. and \tmp\().16b, \tmp\().16b, xtsmask.16b
  369. add \out\().2d, \in\().2d, \in\().2d
  370. ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8
  371. eor \out\().16b, \out\().16b, \tmp\().16b
  372. .endm
  373. .macro xts_load_mask, tmp
  374. movi xtsmask.2s, #0x1
  375. movi \tmp\().2s, #0x87
  376. uzp1 xtsmask.4s, xtsmask.4s, \tmp\().4s
  377. .endm
  378. AES_FUNC_START(aes_xts_encrypt)
  379. stp x29, x30, [sp, #-16]!
  380. mov x29, sp
  381. ld1 {v4.16b}, [x6]
  382. xts_load_mask v8
  383. cbz w7, .Lxtsencnotfirst
  384. enc_prepare w3, x5, x8
  385. xts_cts_skip_tw w7, .LxtsencNx
  386. encrypt_block v4, w3, x5, x8, w7 /* first tweak */
  387. enc_switch_key w3, x2, x8
  388. b .LxtsencNx
  389. .Lxtsencnotfirst:
  390. enc_prepare w3, x2, x8
  391. .LxtsencloopNx:
  392. next_tweak v4, v4, v8
  393. .LxtsencNx:
  394. subs w4, w4, #64
  395. bmi .Lxtsenc1x
  396. ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
  397. next_tweak v5, v4, v8
  398. eor v0.16b, v0.16b, v4.16b
  399. next_tweak v6, v5, v8
  400. eor v1.16b, v1.16b, v5.16b
  401. eor v2.16b, v2.16b, v6.16b
  402. next_tweak v7, v6, v8
  403. eor v3.16b, v3.16b, v7.16b
  404. bl aes_encrypt_block4x
  405. eor v3.16b, v3.16b, v7.16b
  406. eor v0.16b, v0.16b, v4.16b
  407. eor v1.16b, v1.16b, v5.16b
  408. eor v2.16b, v2.16b, v6.16b
  409. st1 {v0.16b-v3.16b}, [x0], #64
  410. mov v4.16b, v7.16b
  411. cbz w4, .Lxtsencret
  412. xts_reload_mask v8
  413. b .LxtsencloopNx
  414. .Lxtsenc1x:
  415. adds w4, w4, #64
  416. beq .Lxtsencout
  417. subs w4, w4, #16
  418. bmi .LxtsencctsNx
  419. .Lxtsencloop:
  420. ld1 {v0.16b}, [x1], #16
  421. .Lxtsencctsout:
  422. eor v0.16b, v0.16b, v4.16b
  423. encrypt_block v0, w3, x2, x8, w7
  424. eor v0.16b, v0.16b, v4.16b
  425. cbz w4, .Lxtsencout
  426. subs w4, w4, #16
  427. next_tweak v4, v4, v8
  428. bmi .Lxtsenccts
  429. st1 {v0.16b}, [x0], #16
  430. b .Lxtsencloop
  431. .Lxtsencout:
  432. st1 {v0.16b}, [x0]
  433. .Lxtsencret:
  434. st1 {v4.16b}, [x6]
  435. ldp x29, x30, [sp], #16
  436. ret
  437. .LxtsencctsNx:
  438. mov v0.16b, v3.16b
  439. sub x0, x0, #16
  440. .Lxtsenccts:
  441. adr_l x8, .Lcts_permute_table
  442. add x1, x1, w4, sxtw /* rewind input pointer */
  443. add w4, w4, #16 /* # bytes in final block */
  444. add x9, x8, #32
  445. add x8, x8, x4
  446. sub x9, x9, x4
  447. add x4, x0, x4 /* output address of final block */
  448. ld1 {v1.16b}, [x1] /* load final block */
  449. ld1 {v2.16b}, [x8]
  450. ld1 {v3.16b}, [x9]
  451. tbl v2.16b, {v0.16b}, v2.16b
  452. tbx v0.16b, {v1.16b}, v3.16b
  453. st1 {v2.16b}, [x4] /* overlapping stores */
  454. mov w4, wzr
  455. b .Lxtsencctsout
  456. AES_FUNC_END(aes_xts_encrypt)
  457. AES_FUNC_START(aes_xts_decrypt)
  458. stp x29, x30, [sp, #-16]!
  459. mov x29, sp
  460. /* subtract 16 bytes if we are doing CTS */
  461. sub w8, w4, #0x10
  462. tst w4, #0xf
  463. csel w4, w4, w8, eq
  464. ld1 {v4.16b}, [x6]
  465. xts_load_mask v8
  466. xts_cts_skip_tw w7, .Lxtsdecskiptw
  467. cbz w7, .Lxtsdecnotfirst
  468. enc_prepare w3, x5, x8
  469. encrypt_block v4, w3, x5, x8, w7 /* first tweak */
  470. .Lxtsdecskiptw:
  471. dec_prepare w3, x2, x8
  472. b .LxtsdecNx
  473. .Lxtsdecnotfirst:
  474. dec_prepare w3, x2, x8
  475. .LxtsdecloopNx:
  476. next_tweak v4, v4, v8
  477. .LxtsdecNx:
  478. subs w4, w4, #64
  479. bmi .Lxtsdec1x
  480. ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
  481. next_tweak v5, v4, v8
  482. eor v0.16b, v0.16b, v4.16b
  483. next_tweak v6, v5, v8
  484. eor v1.16b, v1.16b, v5.16b
  485. eor v2.16b, v2.16b, v6.16b
  486. next_tweak v7, v6, v8
  487. eor v3.16b, v3.16b, v7.16b
  488. bl aes_decrypt_block4x
  489. eor v3.16b, v3.16b, v7.16b
  490. eor v0.16b, v0.16b, v4.16b
  491. eor v1.16b, v1.16b, v5.16b
  492. eor v2.16b, v2.16b, v6.16b
  493. st1 {v0.16b-v3.16b}, [x0], #64
  494. mov v4.16b, v7.16b
  495. cbz w4, .Lxtsdecout
  496. xts_reload_mask v8
  497. b .LxtsdecloopNx
  498. .Lxtsdec1x:
  499. adds w4, w4, #64
  500. beq .Lxtsdecout
  501. subs w4, w4, #16
  502. .Lxtsdecloop:
  503. ld1 {v0.16b}, [x1], #16
  504. bmi .Lxtsdeccts
  505. .Lxtsdecctsout:
  506. eor v0.16b, v0.16b, v4.16b
  507. decrypt_block v0, w3, x2, x8, w7
  508. eor v0.16b, v0.16b, v4.16b
  509. st1 {v0.16b}, [x0], #16
  510. cbz w4, .Lxtsdecout
  511. subs w4, w4, #16
  512. next_tweak v4, v4, v8
  513. b .Lxtsdecloop
  514. .Lxtsdecout:
  515. st1 {v4.16b}, [x6]
  516. ldp x29, x30, [sp], #16
  517. ret
  518. .Lxtsdeccts:
  519. adr_l x8, .Lcts_permute_table
  520. add x1, x1, w4, sxtw /* rewind input pointer */
  521. add w4, w4, #16 /* # bytes in final block */
  522. add x9, x8, #32
  523. add x8, x8, x4
  524. sub x9, x9, x4
  525. add x4, x0, x4 /* output address of final block */
  526. next_tweak v5, v4, v8
  527. ld1 {v1.16b}, [x1] /* load final block */
  528. ld1 {v2.16b}, [x8]
  529. ld1 {v3.16b}, [x9]
  530. eor v0.16b, v0.16b, v5.16b
  531. decrypt_block v0, w3, x2, x8, w7
  532. eor v0.16b, v0.16b, v5.16b
  533. tbl v2.16b, {v0.16b}, v2.16b
  534. tbx v0.16b, {v1.16b}, v3.16b
  535. st1 {v2.16b}, [x4] /* overlapping stores */
  536. mov w4, wzr
  537. b .Lxtsdecctsout
  538. AES_FUNC_END(aes_xts_decrypt)
  539. /*
  540. * aes_mac_update(u8 const in[], u32 const rk[], int rounds,
  541. * int blocks, u8 dg[], int enc_before, int enc_after)
  542. */
  543. AES_FUNC_START(aes_mac_update)
  544. ld1 {v0.16b}, [x4] /* get dg */
  545. enc_prepare w2, x1, x7
  546. cbz w5, .Lmacloop4x
  547. encrypt_block v0, w2, x1, x7, w8
  548. .Lmacloop4x:
  549. subs w3, w3, #4
  550. bmi .Lmac1x
  551. ld1 {v1.16b-v4.16b}, [x0], #64 /* get next pt block */
  552. eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */
  553. encrypt_block v0, w2, x1, x7, w8
  554. eor v0.16b, v0.16b, v2.16b
  555. encrypt_block v0, w2, x1, x7, w8
  556. eor v0.16b, v0.16b, v3.16b
  557. encrypt_block v0, w2, x1, x7, w8
  558. eor v0.16b, v0.16b, v4.16b
  559. cmp w3, wzr
  560. csinv x5, x6, xzr, eq
  561. cbz w5, .Lmacout
  562. encrypt_block v0, w2, x1, x7, w8
  563. st1 {v0.16b}, [x4] /* return dg */
  564. cond_yield .Lmacout, x7, x8
  565. b .Lmacloop4x
  566. .Lmac1x:
  567. add w3, w3, #4
  568. .Lmacloop:
  569. cbz w3, .Lmacout
  570. ld1 {v1.16b}, [x0], #16 /* get next pt block */
  571. eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */
  572. subs w3, w3, #1
  573. csinv x5, x6, xzr, eq
  574. cbz w5, .Lmacout
  575. .Lmacenc:
  576. encrypt_block v0, w2, x1, x7, w8
  577. b .Lmacloop
  578. .Lmacout:
  579. st1 {v0.16b}, [x4] /* return dg */
  580. mov w0, w3
  581. ret
  582. AES_FUNC_END(aes_mac_update)