aes-ce-core.S 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * aes-ce-core.S - AES in CBC/CTR/XTS mode using ARMv8 Crypto Extensions
  4. *
  5. * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
  6. */
  7. #include <linux/linkage.h>
  8. #include <asm/assembler.h>
  9. .text
  10. .arch armv8-a
  11. .fpu crypto-neon-fp-armv8
  12. .align 3
  13. .macro enc_round, state, key
  14. aese.8 \state, \key
  15. aesmc.8 \state, \state
  16. .endm
  17. .macro dec_round, state, key
  18. aesd.8 \state, \key
  19. aesimc.8 \state, \state
  20. .endm
  21. .macro enc_dround, key1, key2
  22. enc_round q0, \key1
  23. enc_round q0, \key2
  24. .endm
  25. .macro dec_dround, key1, key2
  26. dec_round q0, \key1
  27. dec_round q0, \key2
  28. .endm
  29. .macro enc_fround, key1, key2, key3
  30. enc_round q0, \key1
  31. aese.8 q0, \key2
  32. veor q0, q0, \key3
  33. .endm
  34. .macro dec_fround, key1, key2, key3
  35. dec_round q0, \key1
  36. aesd.8 q0, \key2
  37. veor q0, q0, \key3
  38. .endm
  39. .macro enc_dround_4x, key1, key2
  40. enc_round q0, \key1
  41. enc_round q1, \key1
  42. enc_round q2, \key1
  43. enc_round q3, \key1
  44. enc_round q0, \key2
  45. enc_round q1, \key2
  46. enc_round q2, \key2
  47. enc_round q3, \key2
  48. .endm
  49. .macro dec_dround_4x, key1, key2
  50. dec_round q0, \key1
  51. dec_round q1, \key1
  52. dec_round q2, \key1
  53. dec_round q3, \key1
  54. dec_round q0, \key2
  55. dec_round q1, \key2
  56. dec_round q2, \key2
  57. dec_round q3, \key2
  58. .endm
  59. .macro enc_fround_4x, key1, key2, key3
  60. enc_round q0, \key1
  61. enc_round q1, \key1
  62. enc_round q2, \key1
  63. enc_round q3, \key1
  64. aese.8 q0, \key2
  65. aese.8 q1, \key2
  66. aese.8 q2, \key2
  67. aese.8 q3, \key2
  68. veor q0, q0, \key3
  69. veor q1, q1, \key3
  70. veor q2, q2, \key3
  71. veor q3, q3, \key3
  72. .endm
  73. .macro dec_fround_4x, key1, key2, key3
  74. dec_round q0, \key1
  75. dec_round q1, \key1
  76. dec_round q2, \key1
  77. dec_round q3, \key1
  78. aesd.8 q0, \key2
  79. aesd.8 q1, \key2
  80. aesd.8 q2, \key2
  81. aesd.8 q3, \key2
  82. veor q0, q0, \key3
  83. veor q1, q1, \key3
  84. veor q2, q2, \key3
  85. veor q3, q3, \key3
  86. .endm
  87. .macro do_block, dround, fround
  88. cmp r3, #12 @ which key size?
  89. vld1.32 {q10-q11}, [ip]!
  90. \dround q8, q9
  91. vld1.32 {q12-q13}, [ip]!
  92. \dround q10, q11
  93. vld1.32 {q10-q11}, [ip]!
  94. \dround q12, q13
  95. vld1.32 {q12-q13}, [ip]!
  96. \dround q10, q11
  97. blo 0f @ AES-128: 10 rounds
  98. vld1.32 {q10-q11}, [ip]!
  99. \dround q12, q13
  100. beq 1f @ AES-192: 12 rounds
  101. vld1.32 {q12-q13}, [ip]
  102. \dround q10, q11
  103. 0: \fround q12, q13, q14
  104. bx lr
  105. 1: \fround q10, q11, q14
  106. bx lr
  107. .endm
  108. /*
  109. * Internal, non-AAPCS compliant functions that implement the core AES
  110. * transforms. These should preserve all registers except q0 - q2 and ip
  111. * Arguments:
  112. * q0 : first in/output block
  113. * q1 : second in/output block (_4x version only)
  114. * q2 : third in/output block (_4x version only)
  115. * q3 : fourth in/output block (_4x version only)
  116. * q8 : first round key
  117. * q9 : secound round key
  118. * q14 : final round key
  119. * r2 : address of round key array
  120. * r3 : number of rounds
  121. */
  122. .align 6
  123. aes_encrypt:
  124. add ip, r2, #32 @ 3rd round key
  125. .Laes_encrypt_tweak:
  126. do_block enc_dround, enc_fround
  127. ENDPROC(aes_encrypt)
  128. .align 6
  129. aes_decrypt:
  130. add ip, r2, #32 @ 3rd round key
  131. do_block dec_dround, dec_fround
  132. ENDPROC(aes_decrypt)
  133. .align 6
  134. aes_encrypt_4x:
  135. add ip, r2, #32 @ 3rd round key
  136. do_block enc_dround_4x, enc_fround_4x
  137. ENDPROC(aes_encrypt_4x)
  138. .align 6
  139. aes_decrypt_4x:
  140. add ip, r2, #32 @ 3rd round key
  141. do_block dec_dround_4x, dec_fround_4x
  142. ENDPROC(aes_decrypt_4x)
  143. .macro prepare_key, rk, rounds
  144. add ip, \rk, \rounds, lsl #4
  145. vld1.32 {q8-q9}, [\rk] @ load first 2 round keys
  146. vld1.32 {q14}, [ip] @ load last round key
  147. .endm
  148. /*
  149. * aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[], int rounds,
  150. * int blocks)
  151. * aes_ecb_decrypt(u8 out[], u8 const in[], u32 const rk[], int rounds,
  152. * int blocks)
  153. */
  154. ENTRY(ce_aes_ecb_encrypt)
  155. push {r4, lr}
  156. ldr r4, [sp, #8]
  157. prepare_key r2, r3
  158. .Lecbencloop4x:
  159. subs r4, r4, #4
  160. bmi .Lecbenc1x
  161. vld1.8 {q0-q1}, [r1]!
  162. vld1.8 {q2-q3}, [r1]!
  163. bl aes_encrypt_4x
  164. vst1.8 {q0-q1}, [r0]!
  165. vst1.8 {q2-q3}, [r0]!
  166. b .Lecbencloop4x
  167. .Lecbenc1x:
  168. adds r4, r4, #4
  169. beq .Lecbencout
  170. .Lecbencloop:
  171. vld1.8 {q0}, [r1]!
  172. bl aes_encrypt
  173. vst1.8 {q0}, [r0]!
  174. subs r4, r4, #1
  175. bne .Lecbencloop
  176. .Lecbencout:
  177. pop {r4, pc}
  178. ENDPROC(ce_aes_ecb_encrypt)
  179. ENTRY(ce_aes_ecb_decrypt)
  180. push {r4, lr}
  181. ldr r4, [sp, #8]
  182. prepare_key r2, r3
  183. .Lecbdecloop4x:
  184. subs r4, r4, #4
  185. bmi .Lecbdec1x
  186. vld1.8 {q0-q1}, [r1]!
  187. vld1.8 {q2-q3}, [r1]!
  188. bl aes_decrypt_4x
  189. vst1.8 {q0-q1}, [r0]!
  190. vst1.8 {q2-q3}, [r0]!
  191. b .Lecbdecloop4x
  192. .Lecbdec1x:
  193. adds r4, r4, #4
  194. beq .Lecbdecout
  195. .Lecbdecloop:
  196. vld1.8 {q0}, [r1]!
  197. bl aes_decrypt
  198. vst1.8 {q0}, [r0]!
  199. subs r4, r4, #1
  200. bne .Lecbdecloop
  201. .Lecbdecout:
  202. pop {r4, pc}
  203. ENDPROC(ce_aes_ecb_decrypt)
  204. /*
  205. * aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[], int rounds,
  206. * int blocks, u8 iv[])
  207. * aes_cbc_decrypt(u8 out[], u8 const in[], u32 const rk[], int rounds,
  208. * int blocks, u8 iv[])
  209. */
  210. ENTRY(ce_aes_cbc_encrypt)
  211. push {r4-r6, lr}
  212. ldrd r4, r5, [sp, #16]
  213. vld1.8 {q0}, [r5]
  214. prepare_key r2, r3
  215. .Lcbcencloop:
  216. vld1.8 {q1}, [r1]! @ get next pt block
  217. veor q0, q0, q1 @ ..and xor with iv
  218. bl aes_encrypt
  219. vst1.8 {q0}, [r0]!
  220. subs r4, r4, #1
  221. bne .Lcbcencloop
  222. vst1.8 {q0}, [r5]
  223. pop {r4-r6, pc}
  224. ENDPROC(ce_aes_cbc_encrypt)
  225. ENTRY(ce_aes_cbc_decrypt)
  226. push {r4-r6, lr}
  227. ldrd r4, r5, [sp, #16]
  228. vld1.8 {q15}, [r5] @ keep iv in q15
  229. prepare_key r2, r3
  230. .Lcbcdecloop4x:
  231. subs r4, r4, #4
  232. bmi .Lcbcdec1x
  233. vld1.8 {q0-q1}, [r1]!
  234. vld1.8 {q2-q3}, [r1]!
  235. vmov q4, q0
  236. vmov q5, q1
  237. vmov q6, q2
  238. vmov q7, q3
  239. bl aes_decrypt_4x
  240. veor q0, q0, q15
  241. veor q1, q1, q4
  242. veor q2, q2, q5
  243. veor q3, q3, q6
  244. vmov q15, q7
  245. vst1.8 {q0-q1}, [r0]!
  246. vst1.8 {q2-q3}, [r0]!
  247. b .Lcbcdecloop4x
  248. .Lcbcdec1x:
  249. adds r4, r4, #4
  250. beq .Lcbcdecout
  251. vmov q6, q14 @ preserve last round key
  252. .Lcbcdecloop:
  253. vld1.8 {q0}, [r1]! @ get next ct block
  254. veor q14, q15, q6 @ combine prev ct with last key
  255. vmov q15, q0
  256. bl aes_decrypt
  257. vst1.8 {q0}, [r0]!
  258. subs r4, r4, #1
  259. bne .Lcbcdecloop
  260. .Lcbcdecout:
  261. vst1.8 {q15}, [r5] @ keep iv in q15
  262. pop {r4-r6, pc}
  263. ENDPROC(ce_aes_cbc_decrypt)
  264. /*
  265. * ce_aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[],
  266. * int rounds, int bytes, u8 const iv[])
  267. * ce_aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[],
  268. * int rounds, int bytes, u8 const iv[])
  269. */
  270. ENTRY(ce_aes_cbc_cts_encrypt)
  271. push {r4-r6, lr}
  272. ldrd r4, r5, [sp, #16]
  273. movw ip, :lower16:.Lcts_permute_table
  274. movt ip, :upper16:.Lcts_permute_table
  275. sub r4, r4, #16
  276. add lr, ip, #32
  277. add ip, ip, r4
  278. sub lr, lr, r4
  279. vld1.8 {q5}, [ip]
  280. vld1.8 {q6}, [lr]
  281. add ip, r1, r4
  282. vld1.8 {q0}, [r1] @ overlapping loads
  283. vld1.8 {q3}, [ip]
  284. vld1.8 {q1}, [r5] @ get iv
  285. prepare_key r2, r3
  286. veor q0, q0, q1 @ xor with iv
  287. bl aes_encrypt
  288. vtbl.8 d4, {d0-d1}, d10
  289. vtbl.8 d5, {d0-d1}, d11
  290. vtbl.8 d2, {d6-d7}, d12
  291. vtbl.8 d3, {d6-d7}, d13
  292. veor q0, q0, q1
  293. bl aes_encrypt
  294. add r4, r0, r4
  295. vst1.8 {q2}, [r4] @ overlapping stores
  296. vst1.8 {q0}, [r0]
  297. pop {r4-r6, pc}
  298. ENDPROC(ce_aes_cbc_cts_encrypt)
  299. ENTRY(ce_aes_cbc_cts_decrypt)
  300. push {r4-r6, lr}
  301. ldrd r4, r5, [sp, #16]
  302. movw ip, :lower16:.Lcts_permute_table
  303. movt ip, :upper16:.Lcts_permute_table
  304. sub r4, r4, #16
  305. add lr, ip, #32
  306. add ip, ip, r4
  307. sub lr, lr, r4
  308. vld1.8 {q5}, [ip]
  309. vld1.8 {q6}, [lr]
  310. add ip, r1, r4
  311. vld1.8 {q0}, [r1] @ overlapping loads
  312. vld1.8 {q1}, [ip]
  313. vld1.8 {q3}, [r5] @ get iv
  314. prepare_key r2, r3
  315. bl aes_decrypt
  316. vtbl.8 d4, {d0-d1}, d10
  317. vtbl.8 d5, {d0-d1}, d11
  318. vtbx.8 d0, {d2-d3}, d12
  319. vtbx.8 d1, {d2-d3}, d13
  320. veor q1, q1, q2
  321. bl aes_decrypt
  322. veor q0, q0, q3 @ xor with iv
  323. add r4, r0, r4
  324. vst1.8 {q1}, [r4] @ overlapping stores
  325. vst1.8 {q0}, [r0]
  326. pop {r4-r6, pc}
  327. ENDPROC(ce_aes_cbc_cts_decrypt)
  328. /*
  329. * aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[], int rounds,
  330. * int blocks, u8 ctr[])
  331. */
  332. ENTRY(ce_aes_ctr_encrypt)
  333. push {r4-r6, lr}
  334. ldrd r4, r5, [sp, #16]
  335. vld1.8 {q7}, [r5] @ load ctr
  336. prepare_key r2, r3
  337. vmov r6, s31 @ keep swabbed ctr in r6
  338. rev r6, r6
  339. cmn r6, r4 @ 32 bit overflow?
  340. bcs .Lctrloop
  341. .Lctrloop4x:
  342. subs r4, r4, #4
  343. bmi .Lctr1x
  344. /*
  345. * NOTE: the sequence below has been carefully tweaked to avoid
  346. * a silicon erratum that exists in Cortex-A57 (#1742098) and
  347. * Cortex-A72 (#1655431) cores, where AESE/AESMC instruction pairs
  348. * may produce an incorrect result if they take their input from a
  349. * register of which a single 32-bit lane has been updated the last
  350. * time it was modified. To work around this, the lanes of registers
  351. * q0-q3 below are not manipulated individually, and the different
  352. * counter values are prepared by successive manipulations of q7.
  353. */
  354. add ip, r6, #1
  355. vmov q0, q7
  356. rev ip, ip
  357. add lr, r6, #2
  358. vmov s31, ip @ set lane 3 of q1 via q7
  359. add ip, r6, #3
  360. rev lr, lr
  361. vmov q1, q7
  362. vmov s31, lr @ set lane 3 of q2 via q7
  363. rev ip, ip
  364. vmov q2, q7
  365. vmov s31, ip @ set lane 3 of q3 via q7
  366. add r6, r6, #4
  367. vmov q3, q7
  368. vld1.8 {q4-q5}, [r1]!
  369. vld1.8 {q6}, [r1]!
  370. vld1.8 {q15}, [r1]!
  371. bl aes_encrypt_4x
  372. veor q0, q0, q4
  373. veor q1, q1, q5
  374. veor q2, q2, q6
  375. veor q3, q3, q15
  376. rev ip, r6
  377. vst1.8 {q0-q1}, [r0]!
  378. vst1.8 {q2-q3}, [r0]!
  379. vmov s31, ip
  380. b .Lctrloop4x
  381. .Lctr1x:
  382. adds r4, r4, #4
  383. beq .Lctrout
  384. .Lctrloop:
  385. vmov q0, q7
  386. bl aes_encrypt
  387. adds r6, r6, #1 @ increment BE ctr
  388. rev ip, r6
  389. vmov s31, ip
  390. bcs .Lctrcarry
  391. .Lctrcarrydone:
  392. subs r4, r4, #1
  393. bmi .Lctrtailblock @ blocks < 0 means tail block
  394. vld1.8 {q3}, [r1]!
  395. veor q3, q0, q3
  396. vst1.8 {q3}, [r0]!
  397. bne .Lctrloop
  398. .Lctrout:
  399. vst1.8 {q7}, [r5] @ return next CTR value
  400. pop {r4-r6, pc}
  401. .Lctrtailblock:
  402. vst1.8 {q0}, [r0, :64] @ return the key stream
  403. b .Lctrout
  404. .Lctrcarry:
  405. .irp sreg, s30, s29, s28
  406. vmov ip, \sreg @ load next word of ctr
  407. rev ip, ip @ ... to handle the carry
  408. adds ip, ip, #1
  409. rev ip, ip
  410. vmov \sreg, ip
  411. bcc .Lctrcarrydone
  412. .endr
  413. b .Lctrcarrydone
  414. ENDPROC(ce_aes_ctr_encrypt)
  415. /*
  416. * aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[], int rounds,
  417. * int bytes, u8 iv[], u32 const rk2[], int first)
  418. * aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[], int rounds,
  419. * int bytes, u8 iv[], u32 const rk2[], int first)
  420. */
  421. .macro next_tweak, out, in, const, tmp
  422. vshr.s64 \tmp, \in, #63
  423. vand \tmp, \tmp, \const
  424. vadd.u64 \out, \in, \in
  425. vext.8 \tmp, \tmp, \tmp, #8
  426. veor \out, \out, \tmp
  427. .endm
  428. ce_aes_xts_init:
  429. vmov.i32 d30, #0x87 @ compose tweak mask vector
  430. vmovl.u32 q15, d30
  431. vshr.u64 d30, d31, #7
  432. ldrd r4, r5, [sp, #16] @ load args
  433. ldr r6, [sp, #28]
  434. vld1.8 {q0}, [r5] @ load iv
  435. teq r6, #1 @ start of a block?
  436. bxne lr
  437. @ Encrypt the IV in q0 with the second AES key. This should only
  438. @ be done at the start of a block.
  439. ldr r6, [sp, #24] @ load AES key 2
  440. prepare_key r6, r3
  441. add ip, r6, #32 @ 3rd round key of key 2
  442. b .Laes_encrypt_tweak @ tail call
  443. ENDPROC(ce_aes_xts_init)
  444. ENTRY(ce_aes_xts_encrypt)
  445. push {r4-r6, lr}
  446. bl ce_aes_xts_init @ run shared prologue
  447. prepare_key r2, r3
  448. vmov q4, q0
  449. teq r6, #0 @ start of a block?
  450. bne .Lxtsenc4x
  451. .Lxtsencloop4x:
  452. next_tweak q4, q4, q15, q10
  453. .Lxtsenc4x:
  454. subs r4, r4, #64
  455. bmi .Lxtsenc1x
  456. vld1.8 {q0-q1}, [r1]! @ get 4 pt blocks
  457. vld1.8 {q2-q3}, [r1]!
  458. next_tweak q5, q4, q15, q10
  459. veor q0, q0, q4
  460. next_tweak q6, q5, q15, q10
  461. veor q1, q1, q5
  462. next_tweak q7, q6, q15, q10
  463. veor q2, q2, q6
  464. veor q3, q3, q7
  465. bl aes_encrypt_4x
  466. veor q0, q0, q4
  467. veor q1, q1, q5
  468. veor q2, q2, q6
  469. veor q3, q3, q7
  470. vst1.8 {q0-q1}, [r0]! @ write 4 ct blocks
  471. vst1.8 {q2-q3}, [r0]!
  472. vmov q4, q7
  473. teq r4, #0
  474. beq .Lxtsencret
  475. b .Lxtsencloop4x
  476. .Lxtsenc1x:
  477. adds r4, r4, #64
  478. beq .Lxtsencout
  479. subs r4, r4, #16
  480. bmi .LxtsencctsNx
  481. .Lxtsencloop:
  482. vld1.8 {q0}, [r1]!
  483. .Lxtsencctsout:
  484. veor q0, q0, q4
  485. bl aes_encrypt
  486. veor q0, q0, q4
  487. teq r4, #0
  488. beq .Lxtsencout
  489. subs r4, r4, #16
  490. next_tweak q4, q4, q15, q6
  491. bmi .Lxtsenccts
  492. vst1.8 {q0}, [r0]!
  493. b .Lxtsencloop
  494. .Lxtsencout:
  495. vst1.8 {q0}, [r0]
  496. .Lxtsencret:
  497. vst1.8 {q4}, [r5]
  498. pop {r4-r6, pc}
  499. .LxtsencctsNx:
  500. vmov q0, q3
  501. sub r0, r0, #16
  502. .Lxtsenccts:
  503. movw ip, :lower16:.Lcts_permute_table
  504. movt ip, :upper16:.Lcts_permute_table
  505. add r1, r1, r4 @ rewind input pointer
  506. add r4, r4, #16 @ # bytes in final block
  507. add lr, ip, #32
  508. add ip, ip, r4
  509. sub lr, lr, r4
  510. add r4, r0, r4 @ output address of final block
  511. vld1.8 {q1}, [r1] @ load final partial block
  512. vld1.8 {q2}, [ip]
  513. vld1.8 {q3}, [lr]
  514. vtbl.8 d4, {d0-d1}, d4
  515. vtbl.8 d5, {d0-d1}, d5
  516. vtbx.8 d0, {d2-d3}, d6
  517. vtbx.8 d1, {d2-d3}, d7
  518. vst1.8 {q2}, [r4] @ overlapping stores
  519. mov r4, #0
  520. b .Lxtsencctsout
  521. ENDPROC(ce_aes_xts_encrypt)
  522. ENTRY(ce_aes_xts_decrypt)
  523. push {r4-r6, lr}
  524. bl ce_aes_xts_init @ run shared prologue
  525. prepare_key r2, r3
  526. vmov q4, q0
  527. /* subtract 16 bytes if we are doing CTS */
  528. tst r4, #0xf
  529. subne r4, r4, #0x10
  530. teq r6, #0 @ start of a block?
  531. bne .Lxtsdec4x
  532. .Lxtsdecloop4x:
  533. next_tweak q4, q4, q15, q10
  534. .Lxtsdec4x:
  535. subs r4, r4, #64
  536. bmi .Lxtsdec1x
  537. vld1.8 {q0-q1}, [r1]! @ get 4 ct blocks
  538. vld1.8 {q2-q3}, [r1]!
  539. next_tweak q5, q4, q15, q10
  540. veor q0, q0, q4
  541. next_tweak q6, q5, q15, q10
  542. veor q1, q1, q5
  543. next_tweak q7, q6, q15, q10
  544. veor q2, q2, q6
  545. veor q3, q3, q7
  546. bl aes_decrypt_4x
  547. veor q0, q0, q4
  548. veor q1, q1, q5
  549. veor q2, q2, q6
  550. veor q3, q3, q7
  551. vst1.8 {q0-q1}, [r0]! @ write 4 pt blocks
  552. vst1.8 {q2-q3}, [r0]!
  553. vmov q4, q7
  554. teq r4, #0
  555. beq .Lxtsdecout
  556. b .Lxtsdecloop4x
  557. .Lxtsdec1x:
  558. adds r4, r4, #64
  559. beq .Lxtsdecout
  560. subs r4, r4, #16
  561. .Lxtsdecloop:
  562. vld1.8 {q0}, [r1]!
  563. bmi .Lxtsdeccts
  564. .Lxtsdecctsout:
  565. veor q0, q0, q4
  566. bl aes_decrypt
  567. veor q0, q0, q4
  568. vst1.8 {q0}, [r0]!
  569. teq r4, #0
  570. beq .Lxtsdecout
  571. subs r4, r4, #16
  572. next_tweak q4, q4, q15, q6
  573. b .Lxtsdecloop
  574. .Lxtsdecout:
  575. vst1.8 {q4}, [r5]
  576. pop {r4-r6, pc}
  577. .Lxtsdeccts:
  578. movw ip, :lower16:.Lcts_permute_table
  579. movt ip, :upper16:.Lcts_permute_table
  580. add r1, r1, r4 @ rewind input pointer
  581. add r4, r4, #16 @ # bytes in final block
  582. add lr, ip, #32
  583. add ip, ip, r4
  584. sub lr, lr, r4
  585. add r4, r0, r4 @ output address of final block
  586. next_tweak q5, q4, q15, q6
  587. vld1.8 {q1}, [r1] @ load final partial block
  588. vld1.8 {q2}, [ip]
  589. vld1.8 {q3}, [lr]
  590. veor q0, q0, q5
  591. bl aes_decrypt
  592. veor q0, q0, q5
  593. vtbl.8 d4, {d0-d1}, d4
  594. vtbl.8 d5, {d0-d1}, d5
  595. vtbx.8 d0, {d2-d3}, d6
  596. vtbx.8 d1, {d2-d3}, d7
  597. vst1.8 {q2}, [r4] @ overlapping stores
  598. mov r4, #0
  599. b .Lxtsdecctsout
  600. ENDPROC(ce_aes_xts_decrypt)
  601. /*
  602. * u32 ce_aes_sub(u32 input) - use the aese instruction to perform the
  603. * AES sbox substitution on each byte in
  604. * 'input'
  605. */
  606. ENTRY(ce_aes_sub)
  607. vdup.32 q1, r0
  608. veor q0, q0, q0
  609. aese.8 q0, q1
  610. vmov r0, s0
  611. bx lr
  612. ENDPROC(ce_aes_sub)
  613. /*
  614. * void ce_aes_invert(u8 *dst, u8 *src) - perform the Inverse MixColumns
  615. * operation on round key *src
  616. */
  617. ENTRY(ce_aes_invert)
  618. vld1.32 {q0}, [r1]
  619. aesimc.8 q0, q0
  620. vst1.32 {q0}, [r0]
  621. bx lr
  622. ENDPROC(ce_aes_invert)
  623. .section ".rodata", "a"
  624. .align 6
  625. .Lcts_permute_table:
  626. .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
  627. .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
  628. .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
  629. .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf
  630. .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
  631. .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff