aes-ce-ccm-core.S 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * aesce-ccm-core.S - AES-CCM transform for ARMv8 with Crypto Extensions
  4. *
  5. * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
  6. */
  7. #include <linux/linkage.h>
  8. #include <asm/assembler.h>
  9. .text
  10. .arch armv8-a+crypto
  11. /*
  12. * void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
  13. * u32 *macp, u8 const rk[], u32 rounds);
  14. */
  15. SYM_FUNC_START(ce_aes_ccm_auth_data)
  16. ldr w8, [x3] /* leftover from prev round? */
  17. ld1 {v0.16b}, [x0] /* load mac */
  18. cbz w8, 1f
  19. sub w8, w8, #16
  20. eor v1.16b, v1.16b, v1.16b
  21. 0: ldrb w7, [x1], #1 /* get 1 byte of input */
  22. subs w2, w2, #1
  23. add w8, w8, #1
  24. ins v1.b[0], w7
  25. ext v1.16b, v1.16b, v1.16b, #1 /* rotate in the input bytes */
  26. beq 8f /* out of input? */
  27. cbnz w8, 0b
  28. eor v0.16b, v0.16b, v1.16b
  29. 1: ld1 {v3.4s}, [x4] /* load first round key */
  30. prfm pldl1strm, [x1]
  31. cmp w5, #12 /* which key size? */
  32. add x6, x4, #16
  33. sub w7, w5, #2 /* modified # of rounds */
  34. bmi 2f
  35. bne 5f
  36. mov v5.16b, v3.16b
  37. b 4f
  38. 2: mov v4.16b, v3.16b
  39. ld1 {v5.4s}, [x6], #16 /* load 2nd round key */
  40. 3: aese v0.16b, v4.16b
  41. aesmc v0.16b, v0.16b
  42. 4: ld1 {v3.4s}, [x6], #16 /* load next round key */
  43. aese v0.16b, v5.16b
  44. aesmc v0.16b, v0.16b
  45. 5: ld1 {v4.4s}, [x6], #16 /* load next round key */
  46. subs w7, w7, #3
  47. aese v0.16b, v3.16b
  48. aesmc v0.16b, v0.16b
  49. ld1 {v5.4s}, [x6], #16 /* load next round key */
  50. bpl 3b
  51. aese v0.16b, v4.16b
  52. subs w2, w2, #16 /* last data? */
  53. eor v0.16b, v0.16b, v5.16b /* final round */
  54. bmi 6f
  55. ld1 {v1.16b}, [x1], #16 /* load next input block */
  56. eor v0.16b, v0.16b, v1.16b /* xor with mac */
  57. bne 1b
  58. 6: st1 {v0.16b}, [x0] /* store mac */
  59. beq 10f
  60. adds w2, w2, #16
  61. beq 10f
  62. mov w8, w2
  63. 7: ldrb w7, [x1], #1
  64. umov w6, v0.b[0]
  65. eor w6, w6, w7
  66. strb w6, [x0], #1
  67. subs w2, w2, #1
  68. beq 10f
  69. ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */
  70. b 7b
  71. 8: cbz w8, 91f
  72. mov w7, w8
  73. add w8, w8, #16
  74. 9: ext v1.16b, v1.16b, v1.16b, #1
  75. adds w7, w7, #1
  76. bne 9b
  77. 91: eor v0.16b, v0.16b, v1.16b
  78. st1 {v0.16b}, [x0]
  79. 10: str w8, [x3]
  80. ret
  81. SYM_FUNC_END(ce_aes_ccm_auth_data)
  82. /*
  83. * void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u8 const rk[],
  84. * u32 rounds);
  85. */
  86. SYM_FUNC_START(ce_aes_ccm_final)
  87. ld1 {v3.4s}, [x2], #16 /* load first round key */
  88. ld1 {v0.16b}, [x0] /* load mac */
  89. cmp w3, #12 /* which key size? */
  90. sub w3, w3, #2 /* modified # of rounds */
  91. ld1 {v1.16b}, [x1] /* load 1st ctriv */
  92. bmi 0f
  93. bne 3f
  94. mov v5.16b, v3.16b
  95. b 2f
  96. 0: mov v4.16b, v3.16b
  97. 1: ld1 {v5.4s}, [x2], #16 /* load next round key */
  98. aese v0.16b, v4.16b
  99. aesmc v0.16b, v0.16b
  100. aese v1.16b, v4.16b
  101. aesmc v1.16b, v1.16b
  102. 2: ld1 {v3.4s}, [x2], #16 /* load next round key */
  103. aese v0.16b, v5.16b
  104. aesmc v0.16b, v0.16b
  105. aese v1.16b, v5.16b
  106. aesmc v1.16b, v1.16b
  107. 3: ld1 {v4.4s}, [x2], #16 /* load next round key */
  108. subs w3, w3, #3
  109. aese v0.16b, v3.16b
  110. aesmc v0.16b, v0.16b
  111. aese v1.16b, v3.16b
  112. aesmc v1.16b, v1.16b
  113. bpl 1b
  114. aese v0.16b, v4.16b
  115. aese v1.16b, v4.16b
  116. /* final round key cancels out */
  117. eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */
  118. st1 {v0.16b}, [x0] /* store result */
  119. ret
  120. SYM_FUNC_END(ce_aes_ccm_final)
  121. .macro aes_ccm_do_crypt,enc
  122. ldr x8, [x6, #8] /* load lower ctr */
  123. ld1 {v0.16b}, [x5] /* load mac */
  124. CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */
  125. 0: /* outer loop */
  126. ld1 {v1.8b}, [x6] /* load upper ctr */
  127. prfm pldl1strm, [x1]
  128. add x8, x8, #1
  129. rev x9, x8
  130. cmp w4, #12 /* which key size? */
  131. sub w7, w4, #2 /* get modified # of rounds */
  132. ins v1.d[1], x9 /* no carry in lower ctr */
  133. ld1 {v3.4s}, [x3] /* load first round key */
  134. add x10, x3, #16
  135. bmi 1f
  136. bne 4f
  137. mov v5.16b, v3.16b
  138. b 3f
  139. 1: mov v4.16b, v3.16b
  140. ld1 {v5.4s}, [x10], #16 /* load 2nd round key */
  141. 2: /* inner loop: 3 rounds, 2x interleaved */
  142. aese v0.16b, v4.16b
  143. aesmc v0.16b, v0.16b
  144. aese v1.16b, v4.16b
  145. aesmc v1.16b, v1.16b
  146. 3: ld1 {v3.4s}, [x10], #16 /* load next round key */
  147. aese v0.16b, v5.16b
  148. aesmc v0.16b, v0.16b
  149. aese v1.16b, v5.16b
  150. aesmc v1.16b, v1.16b
  151. 4: ld1 {v4.4s}, [x10], #16 /* load next round key */
  152. subs w7, w7, #3
  153. aese v0.16b, v3.16b
  154. aesmc v0.16b, v0.16b
  155. aese v1.16b, v3.16b
  156. aesmc v1.16b, v1.16b
  157. ld1 {v5.4s}, [x10], #16 /* load next round key */
  158. bpl 2b
  159. aese v0.16b, v4.16b
  160. aese v1.16b, v4.16b
  161. subs w2, w2, #16
  162. bmi 6f /* partial block? */
  163. ld1 {v2.16b}, [x1], #16 /* load next input block */
  164. .if \enc == 1
  165. eor v2.16b, v2.16b, v5.16b /* final round enc+mac */
  166. eor v1.16b, v1.16b, v2.16b /* xor with crypted ctr */
  167. .else
  168. eor v2.16b, v2.16b, v1.16b /* xor with crypted ctr */
  169. eor v1.16b, v2.16b, v5.16b /* final round enc */
  170. .endif
  171. eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */
  172. st1 {v1.16b}, [x0], #16 /* write output block */
  173. bne 0b
  174. CPU_LE( rev x8, x8 )
  175. st1 {v0.16b}, [x5] /* store mac */
  176. str x8, [x6, #8] /* store lsb end of ctr (BE) */
  177. 5: ret
  178. 6: eor v0.16b, v0.16b, v5.16b /* final round mac */
  179. eor v1.16b, v1.16b, v5.16b /* final round enc */
  180. st1 {v0.16b}, [x5] /* store mac */
  181. add w2, w2, #16 /* process partial tail block */
  182. 7: ldrb w9, [x1], #1 /* get 1 byte of input */
  183. umov w6, v1.b[0] /* get top crypted ctr byte */
  184. umov w7, v0.b[0] /* get top mac byte */
  185. .if \enc == 1
  186. eor w7, w7, w9
  187. eor w9, w9, w6
  188. .else
  189. eor w9, w9, w6
  190. eor w7, w7, w9
  191. .endif
  192. strb w9, [x0], #1 /* store out byte */
  193. strb w7, [x5], #1 /* store mac byte */
  194. subs w2, w2, #1
  195. beq 5b
  196. ext v0.16b, v0.16b, v0.16b, #1 /* shift out mac byte */
  197. ext v1.16b, v1.16b, v1.16b, #1 /* shift out ctr byte */
  198. b 7b
  199. .endm
  200. /*
  201. * void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes,
  202. * u8 const rk[], u32 rounds, u8 mac[],
  203. * u8 ctr[]);
  204. * void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
  205. * u8 const rk[], u32 rounds, u8 mac[],
  206. * u8 ctr[]);
  207. */
  208. SYM_FUNC_START(ce_aes_ccm_encrypt)
  209. aes_ccm_do_crypt 1
  210. SYM_FUNC_END(ce_aes_ccm_encrypt)
  211. SYM_FUNC_START(ce_aes_ccm_decrypt)
  212. aes_ccm_do_crypt 0
  213. SYM_FUNC_END(ce_aes_ccm_decrypt)