chacha-scalar-core.S 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 2018 Google, Inc.
  4. */
  5. #include <linux/linkage.h>
  6. #include <asm/assembler.h>
  7. /*
  8. * Design notes:
  9. *
  10. * 16 registers would be needed to hold the state matrix, but only 14 are
  11. * available because 'sp' and 'pc' cannot be used. So we spill the elements
  12. * (x8, x9) to the stack and swap them out with (x10, x11). This adds one
  13. * 'ldrd' and one 'strd' instruction per round.
  14. *
  15. * All rotates are performed using the implicit rotate operand accepted by the
  16. * 'add' and 'eor' instructions. This is faster than using explicit rotate
  17. * instructions. To make this work, we allow the values in the second and last
  18. * rows of the ChaCha state matrix (rows 'b' and 'd') to temporarily have the
  19. * wrong rotation amount. The rotation amount is then fixed up just in time
  20. * when the values are used. 'brot' is the number of bits the values in row 'b'
  21. * need to be rotated right to arrive at the correct values, and 'drot'
  22. * similarly for row 'd'. (brot, drot) start out as (0, 0) but we make it such
  23. * that they end up as (25, 24) after every round.
  24. */
  25. // ChaCha state registers
  26. X0 .req r0
  27. X1 .req r1
  28. X2 .req r2
  29. X3 .req r3
  30. X4 .req r4
  31. X5 .req r5
  32. X6 .req r6
  33. X7 .req r7
  34. X8_X10 .req r8 // shared by x8 and x10
  35. X9_X11 .req r9 // shared by x9 and x11
  36. X12 .req r10
  37. X13 .req r11
  38. X14 .req r12
  39. X15 .req r14
  40. .macro __rev out, in, t0, t1, t2
  41. .if __LINUX_ARM_ARCH__ >= 6
  42. rev \out, \in
  43. .else
  44. lsl \t0, \in, #24
  45. and \t1, \in, #0xff00
  46. and \t2, \in, #0xff0000
  47. orr \out, \t0, \in, lsr #24
  48. orr \out, \out, \t1, lsl #8
  49. orr \out, \out, \t2, lsr #8
  50. .endif
  51. .endm
  52. .macro _le32_bswap x, t0, t1, t2
  53. #ifdef __ARMEB__
  54. __rev \x, \x, \t0, \t1, \t2
  55. #endif
  56. .endm
  57. .macro _le32_bswap_4x a, b, c, d, t0, t1, t2
  58. _le32_bswap \a, \t0, \t1, \t2
  59. _le32_bswap \b, \t0, \t1, \t2
  60. _le32_bswap \c, \t0, \t1, \t2
  61. _le32_bswap \d, \t0, \t1, \t2
  62. .endm
  63. .macro __ldrd a, b, src, offset
  64. #if __LINUX_ARM_ARCH__ >= 6
  65. ldrd \a, \b, [\src, #\offset]
  66. #else
  67. ldr \a, [\src, #\offset]
  68. ldr \b, [\src, #\offset + 4]
  69. #endif
  70. .endm
  71. .macro __strd a, b, dst, offset
  72. #if __LINUX_ARM_ARCH__ >= 6
  73. strd \a, \b, [\dst, #\offset]
  74. #else
  75. str \a, [\dst, #\offset]
  76. str \b, [\dst, #\offset + 4]
  77. #endif
  78. .endm
  79. .macro _halfround a1, b1, c1, d1, a2, b2, c2, d2
  80. // a += b; d ^= a; d = rol(d, 16);
  81. add \a1, \a1, \b1, ror #brot
  82. add \a2, \a2, \b2, ror #brot
  83. eor \d1, \a1, \d1, ror #drot
  84. eor \d2, \a2, \d2, ror #drot
  85. // drot == 32 - 16 == 16
  86. // c += d; b ^= c; b = rol(b, 12);
  87. add \c1, \c1, \d1, ror #16
  88. add \c2, \c2, \d2, ror #16
  89. eor \b1, \c1, \b1, ror #brot
  90. eor \b2, \c2, \b2, ror #brot
  91. // brot == 32 - 12 == 20
  92. // a += b; d ^= a; d = rol(d, 8);
  93. add \a1, \a1, \b1, ror #20
  94. add \a2, \a2, \b2, ror #20
  95. eor \d1, \a1, \d1, ror #16
  96. eor \d2, \a2, \d2, ror #16
  97. // drot == 32 - 8 == 24
  98. // c += d; b ^= c; b = rol(b, 7);
  99. add \c1, \c1, \d1, ror #24
  100. add \c2, \c2, \d2, ror #24
  101. eor \b1, \c1, \b1, ror #20
  102. eor \b2, \c2, \b2, ror #20
  103. // brot == 32 - 7 == 25
  104. .endm
  105. .macro _doubleround
  106. // column round
  107. // quarterrounds: (x0, x4, x8, x12) and (x1, x5, x9, x13)
  108. _halfround X0, X4, X8_X10, X12, X1, X5, X9_X11, X13
  109. // save (x8, x9); restore (x10, x11)
  110. __strd X8_X10, X9_X11, sp, 0
  111. __ldrd X8_X10, X9_X11, sp, 8
  112. // quarterrounds: (x2, x6, x10, x14) and (x3, x7, x11, x15)
  113. _halfround X2, X6, X8_X10, X14, X3, X7, X9_X11, X15
  114. .set brot, 25
  115. .set drot, 24
  116. // diagonal round
  117. // quarterrounds: (x0, x5, x10, x15) and (x1, x6, x11, x12)
  118. _halfround X0, X5, X8_X10, X15, X1, X6, X9_X11, X12
  119. // save (x10, x11); restore (x8, x9)
  120. __strd X8_X10, X9_X11, sp, 8
  121. __ldrd X8_X10, X9_X11, sp, 0
  122. // quarterrounds: (x2, x7, x8, x13) and (x3, x4, x9, x14)
  123. _halfround X2, X7, X8_X10, X13, X3, X4, X9_X11, X14
  124. .endm
  125. .macro _chacha_permute nrounds
  126. .set brot, 0
  127. .set drot, 0
  128. .rept \nrounds / 2
  129. _doubleround
  130. .endr
  131. .endm
  132. .macro _chacha nrounds
  133. .Lnext_block\@:
  134. // Stack: unused0-unused1 x10-x11 x0-x15 OUT IN LEN
  135. // Registers contain x0-x9,x12-x15.
  136. // Do the core ChaCha permutation to update x0-x15.
  137. _chacha_permute \nrounds
  138. add sp, #8
  139. // Stack: x10-x11 orig_x0-orig_x15 OUT IN LEN
  140. // Registers contain x0-x9,x12-x15.
  141. // x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'.
  142. // Free up some registers (r8-r12,r14) by pushing (x8-x9,x12-x15).
  143. push {X8_X10, X9_X11, X12, X13, X14, X15}
  144. // Load (OUT, IN, LEN).
  145. ldr r14, [sp, #96]
  146. ldr r12, [sp, #100]
  147. ldr r11, [sp, #104]
  148. orr r10, r14, r12
  149. // Use slow path if fewer than 64 bytes remain.
  150. cmp r11, #64
  151. blt .Lxor_slowpath\@
  152. // Use slow path if IN and/or OUT isn't 4-byte aligned. Needed even on
  153. // ARMv6+, since ldmia and stmia (used below) still require alignment.
  154. tst r10, #3
  155. bne .Lxor_slowpath\@
  156. // Fast path: XOR 64 bytes of aligned data.
  157. // Stack: x8-x9 x12-x15 x10-x11 orig_x0-orig_x15 OUT IN LEN
  158. // Registers: r0-r7 are x0-x7; r8-r11 are free; r12 is IN; r14 is OUT.
  159. // x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'.
  160. // x0-x3
  161. __ldrd r8, r9, sp, 32
  162. __ldrd r10, r11, sp, 40
  163. add X0, X0, r8
  164. add X1, X1, r9
  165. add X2, X2, r10
  166. add X3, X3, r11
  167. _le32_bswap_4x X0, X1, X2, X3, r8, r9, r10
  168. ldmia r12!, {r8-r11}
  169. eor X0, X0, r8
  170. eor X1, X1, r9
  171. eor X2, X2, r10
  172. eor X3, X3, r11
  173. stmia r14!, {X0-X3}
  174. // x4-x7
  175. __ldrd r8, r9, sp, 48
  176. __ldrd r10, r11, sp, 56
  177. add X4, r8, X4, ror #brot
  178. add X5, r9, X5, ror #brot
  179. ldmia r12!, {X0-X3}
  180. add X6, r10, X6, ror #brot
  181. add X7, r11, X7, ror #brot
  182. _le32_bswap_4x X4, X5, X6, X7, r8, r9, r10
  183. eor X4, X4, X0
  184. eor X5, X5, X1
  185. eor X6, X6, X2
  186. eor X7, X7, X3
  187. stmia r14!, {X4-X7}
  188. // x8-x15
  189. pop {r0-r7} // (x8-x9,x12-x15,x10-x11)
  190. __ldrd r8, r9, sp, 32
  191. __ldrd r10, r11, sp, 40
  192. add r0, r0, r8 // x8
  193. add r1, r1, r9 // x9
  194. add r6, r6, r10 // x10
  195. add r7, r7, r11 // x11
  196. _le32_bswap_4x r0, r1, r6, r7, r8, r9, r10
  197. ldmia r12!, {r8-r11}
  198. eor r0, r0, r8 // x8
  199. eor r1, r1, r9 // x9
  200. eor r6, r6, r10 // x10
  201. eor r7, r7, r11 // x11
  202. stmia r14!, {r0,r1,r6,r7}
  203. ldmia r12!, {r0,r1,r6,r7}
  204. __ldrd r8, r9, sp, 48
  205. __ldrd r10, r11, sp, 56
  206. add r2, r8, r2, ror #drot // x12
  207. add r3, r9, r3, ror #drot // x13
  208. add r4, r10, r4, ror #drot // x14
  209. add r5, r11, r5, ror #drot // x15
  210. _le32_bswap_4x r2, r3, r4, r5, r9, r10, r11
  211. ldr r9, [sp, #72] // load LEN
  212. eor r2, r2, r0 // x12
  213. eor r3, r3, r1 // x13
  214. eor r4, r4, r6 // x14
  215. eor r5, r5, r7 // x15
  216. subs r9, #64 // decrement and check LEN
  217. stmia r14!, {r2-r5}
  218. beq .Ldone\@
  219. .Lprepare_for_next_block\@:
  220. // Stack: x0-x15 OUT IN LEN
  221. // Increment block counter (x12)
  222. add r8, #1
  223. // Store updated (OUT, IN, LEN)
  224. str r14, [sp, #64]
  225. str r12, [sp, #68]
  226. str r9, [sp, #72]
  227. mov r14, sp
  228. // Store updated block counter (x12)
  229. str r8, [sp, #48]
  230. sub sp, #16
  231. // Reload state and do next block
  232. ldmia r14!, {r0-r11} // load x0-x11
  233. __strd r10, r11, sp, 8 // store x10-x11 before state
  234. ldmia r14, {r10-r12,r14} // load x12-x15
  235. b .Lnext_block\@
  236. .Lxor_slowpath\@:
  237. // Slow path: < 64 bytes remaining, or unaligned input or output buffer.
  238. // We handle it by storing the 64 bytes of keystream to the stack, then
  239. // XOR-ing the needed portion with the data.
  240. // Allocate keystream buffer
  241. sub sp, #64
  242. mov r14, sp
  243. // Stack: ks0-ks15 x8-x9 x12-x15 x10-x11 orig_x0-orig_x15 OUT IN LEN
  244. // Registers: r0-r7 are x0-x7; r8-r11 are free; r12 is IN; r14 is &ks0.
  245. // x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'.
  246. // Save keystream for x0-x3
  247. __ldrd r8, r9, sp, 96
  248. __ldrd r10, r11, sp, 104
  249. add X0, X0, r8
  250. add X1, X1, r9
  251. add X2, X2, r10
  252. add X3, X3, r11
  253. _le32_bswap_4x X0, X1, X2, X3, r8, r9, r10
  254. stmia r14!, {X0-X3}
  255. // Save keystream for x4-x7
  256. __ldrd r8, r9, sp, 112
  257. __ldrd r10, r11, sp, 120
  258. add X4, r8, X4, ror #brot
  259. add X5, r9, X5, ror #brot
  260. add X6, r10, X6, ror #brot
  261. add X7, r11, X7, ror #brot
  262. _le32_bswap_4x X4, X5, X6, X7, r8, r9, r10
  263. add r8, sp, #64
  264. stmia r14!, {X4-X7}
  265. // Save keystream for x8-x15
  266. ldm r8, {r0-r7} // (x8-x9,x12-x15,x10-x11)
  267. __ldrd r8, r9, sp, 128
  268. __ldrd r10, r11, sp, 136
  269. add r0, r0, r8 // x8
  270. add r1, r1, r9 // x9
  271. add r6, r6, r10 // x10
  272. add r7, r7, r11 // x11
  273. _le32_bswap_4x r0, r1, r6, r7, r8, r9, r10
  274. stmia r14!, {r0,r1,r6,r7}
  275. __ldrd r8, r9, sp, 144
  276. __ldrd r10, r11, sp, 152
  277. add r2, r8, r2, ror #drot // x12
  278. add r3, r9, r3, ror #drot // x13
  279. add r4, r10, r4, ror #drot // x14
  280. add r5, r11, r5, ror #drot // x15
  281. _le32_bswap_4x r2, r3, r4, r5, r9, r10, r11
  282. stmia r14, {r2-r5}
  283. // Stack: ks0-ks15 unused0-unused7 x0-x15 OUT IN LEN
  284. // Registers: r8 is block counter, r12 is IN.
  285. ldr r9, [sp, #168] // LEN
  286. ldr r14, [sp, #160] // OUT
  287. cmp r9, #64
  288. mov r0, sp
  289. movle r1, r9
  290. movgt r1, #64
  291. // r1 is number of bytes to XOR, in range [1, 64]
  292. .if __LINUX_ARM_ARCH__ < 6
  293. orr r2, r12, r14
  294. tst r2, #3 // IN or OUT misaligned?
  295. bne .Lxor_next_byte\@
  296. .endif
  297. // XOR a word at a time
  298. .rept 16
  299. subs r1, #4
  300. blt .Lxor_words_done\@
  301. ldr r2, [r12], #4
  302. ldr r3, [r0], #4
  303. eor r2, r2, r3
  304. str r2, [r14], #4
  305. .endr
  306. b .Lxor_slowpath_done\@
  307. .Lxor_words_done\@:
  308. ands r1, r1, #3
  309. beq .Lxor_slowpath_done\@
  310. // XOR a byte at a time
  311. .Lxor_next_byte\@:
  312. ldrb r2, [r12], #1
  313. ldrb r3, [r0], #1
  314. eor r2, r2, r3
  315. strb r2, [r14], #1
  316. subs r1, #1
  317. bne .Lxor_next_byte\@
  318. .Lxor_slowpath_done\@:
  319. subs r9, #64
  320. add sp, #96
  321. bgt .Lprepare_for_next_block\@
  322. .Ldone\@:
  323. .endm // _chacha
  324. /*
  325. * void chacha_doarm(u8 *dst, const u8 *src, unsigned int bytes,
  326. * const u32 *state, int nrounds);
  327. */
  328. ENTRY(chacha_doarm)
  329. cmp r2, #0 // len == 0?
  330. reteq lr
  331. ldr ip, [sp]
  332. cmp ip, #12
  333. push {r0-r2,r4-r11,lr}
  334. // Push state x0-x15 onto stack.
  335. // Also store an extra copy of x10-x11 just before the state.
  336. add X12, r3, #48
  337. ldm X12, {X12,X13,X14,X15}
  338. push {X12,X13,X14,X15}
  339. sub sp, sp, #64
  340. __ldrd X8_X10, X9_X11, r3, 40
  341. __strd X8_X10, X9_X11, sp, 8
  342. __strd X8_X10, X9_X11, sp, 56
  343. ldm r3, {X0-X9_X11}
  344. __strd X0, X1, sp, 16
  345. __strd X2, X3, sp, 24
  346. __strd X4, X5, sp, 32
  347. __strd X6, X7, sp, 40
  348. __strd X8_X10, X9_X11, sp, 48
  349. beq 1f
  350. _chacha 20
  351. 0: add sp, #76
  352. pop {r4-r11, pc}
  353. 1: _chacha 12
  354. b 0b
  355. ENDPROC(chacha_doarm)
  356. /*
  357. * void hchacha_block_arm(const u32 state[16], u32 out[8], int nrounds);
  358. */
  359. ENTRY(hchacha_block_arm)
  360. push {r1,r4-r11,lr}
  361. cmp r2, #12 // ChaCha12 ?
  362. mov r14, r0
  363. ldmia r14!, {r0-r11} // load x0-x11
  364. push {r10-r11} // store x10-x11 to stack
  365. ldm r14, {r10-r12,r14} // load x12-x15
  366. sub sp, #8
  367. beq 1f
  368. _chacha_permute 20
  369. // Skip over (unused0-unused1, x10-x11)
  370. 0: add sp, #16
  371. // Fix up rotations of x12-x15
  372. ror X12, X12, #drot
  373. ror X13, X13, #drot
  374. pop {r4} // load 'out'
  375. ror X14, X14, #drot
  376. ror X15, X15, #drot
  377. // Store (x0-x3,x12-x15) to 'out'
  378. stm r4, {X0,X1,X2,X3,X12,X13,X14,X15}
  379. pop {r4-r11,pc}
  380. 1: _chacha_permute 12
  381. b 0b
  382. ENDPROC(hchacha_block_arm)