chacha-neon-core.S 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860
  1. /*
  2. * ChaCha/XChaCha NEON helper functions
  3. *
  4. * Copyright (C) 2016-2018 Linaro, Ltd. <ard.biesheuvel@linaro.org>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * Originally based on:
  11. * ChaCha20 256-bit cipher algorithm, RFC7539, x64 SSSE3 functions
  12. *
  13. * Copyright (C) 2015 Martin Willi
  14. *
  15. * This program is free software; you can redistribute it and/or modify
  16. * it under the terms of the GNU General Public License as published by
  17. * the Free Software Foundation; either version 2 of the License, or
  18. * (at your option) any later version.
  19. */
  20. #include <linux/linkage.h>
  21. #include <asm/assembler.h>
  22. #include <asm/cache.h>
  23. .text
  24. .align 6
  25. /*
  26. * chacha_permute - permute one block
  27. *
  28. * Permute one 64-byte block where the state matrix is stored in the four NEON
  29. * registers v0-v3. It performs matrix operations on four words in parallel,
  30. * but requires shuffling to rearrange the words after each round.
  31. *
  32. * The round count is given in w3.
  33. *
  34. * Clobbers: w3, x10, v4, v12
  35. */
  36. SYM_FUNC_START_LOCAL(chacha_permute)
  37. adr_l x10, ROT8
  38. ld1 {v12.4s}, [x10]
  39. .Ldoubleround:
  40. // x0 += x1, x3 = rotl32(x3 ^ x0, 16)
  41. add v0.4s, v0.4s, v1.4s
  42. eor v3.16b, v3.16b, v0.16b
  43. rev32 v3.8h, v3.8h
  44. // x2 += x3, x1 = rotl32(x1 ^ x2, 12)
  45. add v2.4s, v2.4s, v3.4s
  46. eor v4.16b, v1.16b, v2.16b
  47. shl v1.4s, v4.4s, #12
  48. sri v1.4s, v4.4s, #20
  49. // x0 += x1, x3 = rotl32(x3 ^ x0, 8)
  50. add v0.4s, v0.4s, v1.4s
  51. eor v3.16b, v3.16b, v0.16b
  52. tbl v3.16b, {v3.16b}, v12.16b
  53. // x2 += x3, x1 = rotl32(x1 ^ x2, 7)
  54. add v2.4s, v2.4s, v3.4s
  55. eor v4.16b, v1.16b, v2.16b
  56. shl v1.4s, v4.4s, #7
  57. sri v1.4s, v4.4s, #25
  58. // x1 = shuffle32(x1, MASK(0, 3, 2, 1))
  59. ext v1.16b, v1.16b, v1.16b, #4
  60. // x2 = shuffle32(x2, MASK(1, 0, 3, 2))
  61. ext v2.16b, v2.16b, v2.16b, #8
  62. // x3 = shuffle32(x3, MASK(2, 1, 0, 3))
  63. ext v3.16b, v3.16b, v3.16b, #12
  64. // x0 += x1, x3 = rotl32(x3 ^ x0, 16)
  65. add v0.4s, v0.4s, v1.4s
  66. eor v3.16b, v3.16b, v0.16b
  67. rev32 v3.8h, v3.8h
  68. // x2 += x3, x1 = rotl32(x1 ^ x2, 12)
  69. add v2.4s, v2.4s, v3.4s
  70. eor v4.16b, v1.16b, v2.16b
  71. shl v1.4s, v4.4s, #12
  72. sri v1.4s, v4.4s, #20
  73. // x0 += x1, x3 = rotl32(x3 ^ x0, 8)
  74. add v0.4s, v0.4s, v1.4s
  75. eor v3.16b, v3.16b, v0.16b
  76. tbl v3.16b, {v3.16b}, v12.16b
  77. // x2 += x3, x1 = rotl32(x1 ^ x2, 7)
  78. add v2.4s, v2.4s, v3.4s
  79. eor v4.16b, v1.16b, v2.16b
  80. shl v1.4s, v4.4s, #7
  81. sri v1.4s, v4.4s, #25
  82. // x1 = shuffle32(x1, MASK(2, 1, 0, 3))
  83. ext v1.16b, v1.16b, v1.16b, #12
  84. // x2 = shuffle32(x2, MASK(1, 0, 3, 2))
  85. ext v2.16b, v2.16b, v2.16b, #8
  86. // x3 = shuffle32(x3, MASK(0, 3, 2, 1))
  87. ext v3.16b, v3.16b, v3.16b, #4
  88. subs w3, w3, #2
  89. b.ne .Ldoubleround
  90. ret
  91. SYM_FUNC_END(chacha_permute)
  92. SYM_FUNC_START(chacha_block_xor_neon)
  93. // x0: Input state matrix, s
  94. // x1: 1 data block output, o
  95. // x2: 1 data block input, i
  96. // w3: nrounds
  97. stp x29, x30, [sp, #-16]!
  98. mov x29, sp
  99. // x0..3 = s0..3
  100. ld1 {v0.4s-v3.4s}, [x0]
  101. ld1 {v8.4s-v11.4s}, [x0]
  102. bl chacha_permute
  103. ld1 {v4.16b-v7.16b}, [x2]
  104. // o0 = i0 ^ (x0 + s0)
  105. add v0.4s, v0.4s, v8.4s
  106. eor v0.16b, v0.16b, v4.16b
  107. // o1 = i1 ^ (x1 + s1)
  108. add v1.4s, v1.4s, v9.4s
  109. eor v1.16b, v1.16b, v5.16b
  110. // o2 = i2 ^ (x2 + s2)
  111. add v2.4s, v2.4s, v10.4s
  112. eor v2.16b, v2.16b, v6.16b
  113. // o3 = i3 ^ (x3 + s3)
  114. add v3.4s, v3.4s, v11.4s
  115. eor v3.16b, v3.16b, v7.16b
  116. st1 {v0.16b-v3.16b}, [x1]
  117. ldp x29, x30, [sp], #16
  118. ret
  119. SYM_FUNC_END(chacha_block_xor_neon)
  120. SYM_FUNC_START(hchacha_block_neon)
  121. // x0: Input state matrix, s
  122. // x1: output (8 32-bit words)
  123. // w2: nrounds
  124. stp x29, x30, [sp, #-16]!
  125. mov x29, sp
  126. ld1 {v0.4s-v3.4s}, [x0]
  127. mov w3, w2
  128. bl chacha_permute
  129. st1 {v0.4s}, [x1], #16
  130. st1 {v3.4s}, [x1]
  131. ldp x29, x30, [sp], #16
  132. ret
  133. SYM_FUNC_END(hchacha_block_neon)
  134. a0 .req w12
  135. a1 .req w13
  136. a2 .req w14
  137. a3 .req w15
  138. a4 .req w16
  139. a5 .req w17
  140. a6 .req w19
  141. a7 .req w20
  142. a8 .req w21
  143. a9 .req w22
  144. a10 .req w23
  145. a11 .req w24
  146. a12 .req w25
  147. a13 .req w26
  148. a14 .req w27
  149. a15 .req w28
  150. .align 6
  151. SYM_FUNC_START(chacha_4block_xor_neon)
  152. frame_push 10
  153. // x0: Input state matrix, s
  154. // x1: 4 data blocks output, o
  155. // x2: 4 data blocks input, i
  156. // w3: nrounds
  157. // x4: byte count
  158. adr_l x10, .Lpermute
  159. and x5, x4, #63
  160. add x10, x10, x5
  161. add x11, x10, #64
  162. //
  163. // This function encrypts four consecutive ChaCha blocks by loading
  164. // the state matrix in NEON registers four times. The algorithm performs
  165. // each operation on the corresponding word of each state matrix, hence
  166. // requires no word shuffling. For final XORing step we transpose the
  167. // matrix by interleaving 32- and then 64-bit words, which allows us to
  168. // do XOR in NEON registers.
  169. //
  170. // At the same time, a fifth block is encrypted in parallel using
  171. // scalar registers
  172. //
  173. adr_l x9, CTRINC // ... and ROT8
  174. ld1 {v30.4s-v31.4s}, [x9]
  175. // x0..15[0-3] = s0..3[0..3]
  176. add x8, x0, #16
  177. ld4r { v0.4s- v3.4s}, [x0]
  178. ld4r { v4.4s- v7.4s}, [x8], #16
  179. ld4r { v8.4s-v11.4s}, [x8], #16
  180. ld4r {v12.4s-v15.4s}, [x8]
  181. mov a0, v0.s[0]
  182. mov a1, v1.s[0]
  183. mov a2, v2.s[0]
  184. mov a3, v3.s[0]
  185. mov a4, v4.s[0]
  186. mov a5, v5.s[0]
  187. mov a6, v6.s[0]
  188. mov a7, v7.s[0]
  189. mov a8, v8.s[0]
  190. mov a9, v9.s[0]
  191. mov a10, v10.s[0]
  192. mov a11, v11.s[0]
  193. mov a12, v12.s[0]
  194. mov a13, v13.s[0]
  195. mov a14, v14.s[0]
  196. mov a15, v15.s[0]
  197. // x12 += counter values 1-4
  198. add v12.4s, v12.4s, v30.4s
  199. .Ldoubleround4:
  200. // x0 += x4, x12 = rotl32(x12 ^ x0, 16)
  201. // x1 += x5, x13 = rotl32(x13 ^ x1, 16)
  202. // x2 += x6, x14 = rotl32(x14 ^ x2, 16)
  203. // x3 += x7, x15 = rotl32(x15 ^ x3, 16)
  204. add v0.4s, v0.4s, v4.4s
  205. add a0, a0, a4
  206. add v1.4s, v1.4s, v5.4s
  207. add a1, a1, a5
  208. add v2.4s, v2.4s, v6.4s
  209. add a2, a2, a6
  210. add v3.4s, v3.4s, v7.4s
  211. add a3, a3, a7
  212. eor v12.16b, v12.16b, v0.16b
  213. eor a12, a12, a0
  214. eor v13.16b, v13.16b, v1.16b
  215. eor a13, a13, a1
  216. eor v14.16b, v14.16b, v2.16b
  217. eor a14, a14, a2
  218. eor v15.16b, v15.16b, v3.16b
  219. eor a15, a15, a3
  220. rev32 v12.8h, v12.8h
  221. ror a12, a12, #16
  222. rev32 v13.8h, v13.8h
  223. ror a13, a13, #16
  224. rev32 v14.8h, v14.8h
  225. ror a14, a14, #16
  226. rev32 v15.8h, v15.8h
  227. ror a15, a15, #16
  228. // x8 += x12, x4 = rotl32(x4 ^ x8, 12)
  229. // x9 += x13, x5 = rotl32(x5 ^ x9, 12)
  230. // x10 += x14, x6 = rotl32(x6 ^ x10, 12)
  231. // x11 += x15, x7 = rotl32(x7 ^ x11, 12)
  232. add v8.4s, v8.4s, v12.4s
  233. add a8, a8, a12
  234. add v9.4s, v9.4s, v13.4s
  235. add a9, a9, a13
  236. add v10.4s, v10.4s, v14.4s
  237. add a10, a10, a14
  238. add v11.4s, v11.4s, v15.4s
  239. add a11, a11, a15
  240. eor v16.16b, v4.16b, v8.16b
  241. eor a4, a4, a8
  242. eor v17.16b, v5.16b, v9.16b
  243. eor a5, a5, a9
  244. eor v18.16b, v6.16b, v10.16b
  245. eor a6, a6, a10
  246. eor v19.16b, v7.16b, v11.16b
  247. eor a7, a7, a11
  248. shl v4.4s, v16.4s, #12
  249. shl v5.4s, v17.4s, #12
  250. shl v6.4s, v18.4s, #12
  251. shl v7.4s, v19.4s, #12
  252. sri v4.4s, v16.4s, #20
  253. ror a4, a4, #20
  254. sri v5.4s, v17.4s, #20
  255. ror a5, a5, #20
  256. sri v6.4s, v18.4s, #20
  257. ror a6, a6, #20
  258. sri v7.4s, v19.4s, #20
  259. ror a7, a7, #20
  260. // x0 += x4, x12 = rotl32(x12 ^ x0, 8)
  261. // x1 += x5, x13 = rotl32(x13 ^ x1, 8)
  262. // x2 += x6, x14 = rotl32(x14 ^ x2, 8)
  263. // x3 += x7, x15 = rotl32(x15 ^ x3, 8)
  264. add v0.4s, v0.4s, v4.4s
  265. add a0, a0, a4
  266. add v1.4s, v1.4s, v5.4s
  267. add a1, a1, a5
  268. add v2.4s, v2.4s, v6.4s
  269. add a2, a2, a6
  270. add v3.4s, v3.4s, v7.4s
  271. add a3, a3, a7
  272. eor v12.16b, v12.16b, v0.16b
  273. eor a12, a12, a0
  274. eor v13.16b, v13.16b, v1.16b
  275. eor a13, a13, a1
  276. eor v14.16b, v14.16b, v2.16b
  277. eor a14, a14, a2
  278. eor v15.16b, v15.16b, v3.16b
  279. eor a15, a15, a3
  280. tbl v12.16b, {v12.16b}, v31.16b
  281. ror a12, a12, #24
  282. tbl v13.16b, {v13.16b}, v31.16b
  283. ror a13, a13, #24
  284. tbl v14.16b, {v14.16b}, v31.16b
  285. ror a14, a14, #24
  286. tbl v15.16b, {v15.16b}, v31.16b
  287. ror a15, a15, #24
  288. // x8 += x12, x4 = rotl32(x4 ^ x8, 7)
  289. // x9 += x13, x5 = rotl32(x5 ^ x9, 7)
  290. // x10 += x14, x6 = rotl32(x6 ^ x10, 7)
  291. // x11 += x15, x7 = rotl32(x7 ^ x11, 7)
  292. add v8.4s, v8.4s, v12.4s
  293. add a8, a8, a12
  294. add v9.4s, v9.4s, v13.4s
  295. add a9, a9, a13
  296. add v10.4s, v10.4s, v14.4s
  297. add a10, a10, a14
  298. add v11.4s, v11.4s, v15.4s
  299. add a11, a11, a15
  300. eor v16.16b, v4.16b, v8.16b
  301. eor a4, a4, a8
  302. eor v17.16b, v5.16b, v9.16b
  303. eor a5, a5, a9
  304. eor v18.16b, v6.16b, v10.16b
  305. eor a6, a6, a10
  306. eor v19.16b, v7.16b, v11.16b
  307. eor a7, a7, a11
  308. shl v4.4s, v16.4s, #7
  309. shl v5.4s, v17.4s, #7
  310. shl v6.4s, v18.4s, #7
  311. shl v7.4s, v19.4s, #7
  312. sri v4.4s, v16.4s, #25
  313. ror a4, a4, #25
  314. sri v5.4s, v17.4s, #25
  315. ror a5, a5, #25
  316. sri v6.4s, v18.4s, #25
  317. ror a6, a6, #25
  318. sri v7.4s, v19.4s, #25
  319. ror a7, a7, #25
  320. // x0 += x5, x15 = rotl32(x15 ^ x0, 16)
  321. // x1 += x6, x12 = rotl32(x12 ^ x1, 16)
  322. // x2 += x7, x13 = rotl32(x13 ^ x2, 16)
  323. // x3 += x4, x14 = rotl32(x14 ^ x3, 16)
  324. add v0.4s, v0.4s, v5.4s
  325. add a0, a0, a5
  326. add v1.4s, v1.4s, v6.4s
  327. add a1, a1, a6
  328. add v2.4s, v2.4s, v7.4s
  329. add a2, a2, a7
  330. add v3.4s, v3.4s, v4.4s
  331. add a3, a3, a4
  332. eor v15.16b, v15.16b, v0.16b
  333. eor a15, a15, a0
  334. eor v12.16b, v12.16b, v1.16b
  335. eor a12, a12, a1
  336. eor v13.16b, v13.16b, v2.16b
  337. eor a13, a13, a2
  338. eor v14.16b, v14.16b, v3.16b
  339. eor a14, a14, a3
  340. rev32 v15.8h, v15.8h
  341. ror a15, a15, #16
  342. rev32 v12.8h, v12.8h
  343. ror a12, a12, #16
  344. rev32 v13.8h, v13.8h
  345. ror a13, a13, #16
  346. rev32 v14.8h, v14.8h
  347. ror a14, a14, #16
  348. // x10 += x15, x5 = rotl32(x5 ^ x10, 12)
  349. // x11 += x12, x6 = rotl32(x6 ^ x11, 12)
  350. // x8 += x13, x7 = rotl32(x7 ^ x8, 12)
  351. // x9 += x14, x4 = rotl32(x4 ^ x9, 12)
  352. add v10.4s, v10.4s, v15.4s
  353. add a10, a10, a15
  354. add v11.4s, v11.4s, v12.4s
  355. add a11, a11, a12
  356. add v8.4s, v8.4s, v13.4s
  357. add a8, a8, a13
  358. add v9.4s, v9.4s, v14.4s
  359. add a9, a9, a14
  360. eor v16.16b, v5.16b, v10.16b
  361. eor a5, a5, a10
  362. eor v17.16b, v6.16b, v11.16b
  363. eor a6, a6, a11
  364. eor v18.16b, v7.16b, v8.16b
  365. eor a7, a7, a8
  366. eor v19.16b, v4.16b, v9.16b
  367. eor a4, a4, a9
  368. shl v5.4s, v16.4s, #12
  369. shl v6.4s, v17.4s, #12
  370. shl v7.4s, v18.4s, #12
  371. shl v4.4s, v19.4s, #12
  372. sri v5.4s, v16.4s, #20
  373. ror a5, a5, #20
  374. sri v6.4s, v17.4s, #20
  375. ror a6, a6, #20
  376. sri v7.4s, v18.4s, #20
  377. ror a7, a7, #20
  378. sri v4.4s, v19.4s, #20
  379. ror a4, a4, #20
  380. // x0 += x5, x15 = rotl32(x15 ^ x0, 8)
  381. // x1 += x6, x12 = rotl32(x12 ^ x1, 8)
  382. // x2 += x7, x13 = rotl32(x13 ^ x2, 8)
  383. // x3 += x4, x14 = rotl32(x14 ^ x3, 8)
  384. add v0.4s, v0.4s, v5.4s
  385. add a0, a0, a5
  386. add v1.4s, v1.4s, v6.4s
  387. add a1, a1, a6
  388. add v2.4s, v2.4s, v7.4s
  389. add a2, a2, a7
  390. add v3.4s, v3.4s, v4.4s
  391. add a3, a3, a4
  392. eor v15.16b, v15.16b, v0.16b
  393. eor a15, a15, a0
  394. eor v12.16b, v12.16b, v1.16b
  395. eor a12, a12, a1
  396. eor v13.16b, v13.16b, v2.16b
  397. eor a13, a13, a2
  398. eor v14.16b, v14.16b, v3.16b
  399. eor a14, a14, a3
  400. tbl v15.16b, {v15.16b}, v31.16b
  401. ror a15, a15, #24
  402. tbl v12.16b, {v12.16b}, v31.16b
  403. ror a12, a12, #24
  404. tbl v13.16b, {v13.16b}, v31.16b
  405. ror a13, a13, #24
  406. tbl v14.16b, {v14.16b}, v31.16b
  407. ror a14, a14, #24
  408. // x10 += x15, x5 = rotl32(x5 ^ x10, 7)
  409. // x11 += x12, x6 = rotl32(x6 ^ x11, 7)
  410. // x8 += x13, x7 = rotl32(x7 ^ x8, 7)
  411. // x9 += x14, x4 = rotl32(x4 ^ x9, 7)
  412. add v10.4s, v10.4s, v15.4s
  413. add a10, a10, a15
  414. add v11.4s, v11.4s, v12.4s
  415. add a11, a11, a12
  416. add v8.4s, v8.4s, v13.4s
  417. add a8, a8, a13
  418. add v9.4s, v9.4s, v14.4s
  419. add a9, a9, a14
  420. eor v16.16b, v5.16b, v10.16b
  421. eor a5, a5, a10
  422. eor v17.16b, v6.16b, v11.16b
  423. eor a6, a6, a11
  424. eor v18.16b, v7.16b, v8.16b
  425. eor a7, a7, a8
  426. eor v19.16b, v4.16b, v9.16b
  427. eor a4, a4, a9
  428. shl v5.4s, v16.4s, #7
  429. shl v6.4s, v17.4s, #7
  430. shl v7.4s, v18.4s, #7
  431. shl v4.4s, v19.4s, #7
  432. sri v5.4s, v16.4s, #25
  433. ror a5, a5, #25
  434. sri v6.4s, v17.4s, #25
  435. ror a6, a6, #25
  436. sri v7.4s, v18.4s, #25
  437. ror a7, a7, #25
  438. sri v4.4s, v19.4s, #25
  439. ror a4, a4, #25
  440. subs w3, w3, #2
  441. b.ne .Ldoubleround4
  442. ld4r {v16.4s-v19.4s}, [x0], #16
  443. ld4r {v20.4s-v23.4s}, [x0], #16
  444. // x12 += counter values 0-3
  445. add v12.4s, v12.4s, v30.4s
  446. // x0[0-3] += s0[0]
  447. // x1[0-3] += s0[1]
  448. // x2[0-3] += s0[2]
  449. // x3[0-3] += s0[3]
  450. add v0.4s, v0.4s, v16.4s
  451. mov w6, v16.s[0]
  452. mov w7, v17.s[0]
  453. add v1.4s, v1.4s, v17.4s
  454. mov w8, v18.s[0]
  455. mov w9, v19.s[0]
  456. add v2.4s, v2.4s, v18.4s
  457. add a0, a0, w6
  458. add a1, a1, w7
  459. add v3.4s, v3.4s, v19.4s
  460. add a2, a2, w8
  461. add a3, a3, w9
  462. CPU_BE( rev a0, a0 )
  463. CPU_BE( rev a1, a1 )
  464. CPU_BE( rev a2, a2 )
  465. CPU_BE( rev a3, a3 )
  466. ld4r {v24.4s-v27.4s}, [x0], #16
  467. ld4r {v28.4s-v31.4s}, [x0]
  468. // x4[0-3] += s1[0]
  469. // x5[0-3] += s1[1]
  470. // x6[0-3] += s1[2]
  471. // x7[0-3] += s1[3]
  472. add v4.4s, v4.4s, v20.4s
  473. mov w6, v20.s[0]
  474. mov w7, v21.s[0]
  475. add v5.4s, v5.4s, v21.4s
  476. mov w8, v22.s[0]
  477. mov w9, v23.s[0]
  478. add v6.4s, v6.4s, v22.4s
  479. add a4, a4, w6
  480. add a5, a5, w7
  481. add v7.4s, v7.4s, v23.4s
  482. add a6, a6, w8
  483. add a7, a7, w9
  484. CPU_BE( rev a4, a4 )
  485. CPU_BE( rev a5, a5 )
  486. CPU_BE( rev a6, a6 )
  487. CPU_BE( rev a7, a7 )
  488. // x8[0-3] += s2[0]
  489. // x9[0-3] += s2[1]
  490. // x10[0-3] += s2[2]
  491. // x11[0-3] += s2[3]
  492. add v8.4s, v8.4s, v24.4s
  493. mov w6, v24.s[0]
  494. mov w7, v25.s[0]
  495. add v9.4s, v9.4s, v25.4s
  496. mov w8, v26.s[0]
  497. mov w9, v27.s[0]
  498. add v10.4s, v10.4s, v26.4s
  499. add a8, a8, w6
  500. add a9, a9, w7
  501. add v11.4s, v11.4s, v27.4s
  502. add a10, a10, w8
  503. add a11, a11, w9
  504. CPU_BE( rev a8, a8 )
  505. CPU_BE( rev a9, a9 )
  506. CPU_BE( rev a10, a10 )
  507. CPU_BE( rev a11, a11 )
  508. // x12[0-3] += s3[0]
  509. // x13[0-3] += s3[1]
  510. // x14[0-3] += s3[2]
  511. // x15[0-3] += s3[3]
  512. add v12.4s, v12.4s, v28.4s
  513. mov w6, v28.s[0]
  514. mov w7, v29.s[0]
  515. add v13.4s, v13.4s, v29.4s
  516. mov w8, v30.s[0]
  517. mov w9, v31.s[0]
  518. add v14.4s, v14.4s, v30.4s
  519. add a12, a12, w6
  520. add a13, a13, w7
  521. add v15.4s, v15.4s, v31.4s
  522. add a14, a14, w8
  523. add a15, a15, w9
  524. CPU_BE( rev a12, a12 )
  525. CPU_BE( rev a13, a13 )
  526. CPU_BE( rev a14, a14 )
  527. CPU_BE( rev a15, a15 )
  528. // interleave 32-bit words in state n, n+1
  529. ldp w6, w7, [x2], #64
  530. zip1 v16.4s, v0.4s, v1.4s
  531. ldp w8, w9, [x2, #-56]
  532. eor a0, a0, w6
  533. zip2 v17.4s, v0.4s, v1.4s
  534. eor a1, a1, w7
  535. zip1 v18.4s, v2.4s, v3.4s
  536. eor a2, a2, w8
  537. zip2 v19.4s, v2.4s, v3.4s
  538. eor a3, a3, w9
  539. ldp w6, w7, [x2, #-48]
  540. zip1 v20.4s, v4.4s, v5.4s
  541. ldp w8, w9, [x2, #-40]
  542. eor a4, a4, w6
  543. zip2 v21.4s, v4.4s, v5.4s
  544. eor a5, a5, w7
  545. zip1 v22.4s, v6.4s, v7.4s
  546. eor a6, a6, w8
  547. zip2 v23.4s, v6.4s, v7.4s
  548. eor a7, a7, w9
  549. ldp w6, w7, [x2, #-32]
  550. zip1 v24.4s, v8.4s, v9.4s
  551. ldp w8, w9, [x2, #-24]
  552. eor a8, a8, w6
  553. zip2 v25.4s, v8.4s, v9.4s
  554. eor a9, a9, w7
  555. zip1 v26.4s, v10.4s, v11.4s
  556. eor a10, a10, w8
  557. zip2 v27.4s, v10.4s, v11.4s
  558. eor a11, a11, w9
  559. ldp w6, w7, [x2, #-16]
  560. zip1 v28.4s, v12.4s, v13.4s
  561. ldp w8, w9, [x2, #-8]
  562. eor a12, a12, w6
  563. zip2 v29.4s, v12.4s, v13.4s
  564. eor a13, a13, w7
  565. zip1 v30.4s, v14.4s, v15.4s
  566. eor a14, a14, w8
  567. zip2 v31.4s, v14.4s, v15.4s
  568. eor a15, a15, w9
  569. mov x3, #64
  570. subs x5, x4, #128
  571. add x6, x5, x2
  572. csel x3, x3, xzr, ge
  573. csel x2, x2, x6, ge
  574. // interleave 64-bit words in state n, n+2
  575. zip1 v0.2d, v16.2d, v18.2d
  576. zip2 v4.2d, v16.2d, v18.2d
  577. stp a0, a1, [x1], #64
  578. zip1 v8.2d, v17.2d, v19.2d
  579. zip2 v12.2d, v17.2d, v19.2d
  580. stp a2, a3, [x1, #-56]
  581. ld1 {v16.16b-v19.16b}, [x2], x3
  582. subs x6, x4, #192
  583. ccmp x3, xzr, #4, lt
  584. add x7, x6, x2
  585. csel x3, x3, xzr, eq
  586. csel x2, x2, x7, eq
  587. zip1 v1.2d, v20.2d, v22.2d
  588. zip2 v5.2d, v20.2d, v22.2d
  589. stp a4, a5, [x1, #-48]
  590. zip1 v9.2d, v21.2d, v23.2d
  591. zip2 v13.2d, v21.2d, v23.2d
  592. stp a6, a7, [x1, #-40]
  593. ld1 {v20.16b-v23.16b}, [x2], x3
  594. subs x7, x4, #256
  595. ccmp x3, xzr, #4, lt
  596. add x8, x7, x2
  597. csel x3, x3, xzr, eq
  598. csel x2, x2, x8, eq
  599. zip1 v2.2d, v24.2d, v26.2d
  600. zip2 v6.2d, v24.2d, v26.2d
  601. stp a8, a9, [x1, #-32]
  602. zip1 v10.2d, v25.2d, v27.2d
  603. zip2 v14.2d, v25.2d, v27.2d
  604. stp a10, a11, [x1, #-24]
  605. ld1 {v24.16b-v27.16b}, [x2], x3
  606. subs x8, x4, #320
  607. ccmp x3, xzr, #4, lt
  608. add x9, x8, x2
  609. csel x2, x2, x9, eq
  610. zip1 v3.2d, v28.2d, v30.2d
  611. zip2 v7.2d, v28.2d, v30.2d
  612. stp a12, a13, [x1, #-16]
  613. zip1 v11.2d, v29.2d, v31.2d
  614. zip2 v15.2d, v29.2d, v31.2d
  615. stp a14, a15, [x1, #-8]
  616. ld1 {v28.16b-v31.16b}, [x2]
  617. // xor with corresponding input, write to output
  618. tbnz x5, #63, 0f
  619. eor v16.16b, v16.16b, v0.16b
  620. eor v17.16b, v17.16b, v1.16b
  621. eor v18.16b, v18.16b, v2.16b
  622. eor v19.16b, v19.16b, v3.16b
  623. st1 {v16.16b-v19.16b}, [x1], #64
  624. cbz x5, .Lout
  625. tbnz x6, #63, 1f
  626. eor v20.16b, v20.16b, v4.16b
  627. eor v21.16b, v21.16b, v5.16b
  628. eor v22.16b, v22.16b, v6.16b
  629. eor v23.16b, v23.16b, v7.16b
  630. st1 {v20.16b-v23.16b}, [x1], #64
  631. cbz x6, .Lout
  632. tbnz x7, #63, 2f
  633. eor v24.16b, v24.16b, v8.16b
  634. eor v25.16b, v25.16b, v9.16b
  635. eor v26.16b, v26.16b, v10.16b
  636. eor v27.16b, v27.16b, v11.16b
  637. st1 {v24.16b-v27.16b}, [x1], #64
  638. cbz x7, .Lout
  639. tbnz x8, #63, 3f
  640. eor v28.16b, v28.16b, v12.16b
  641. eor v29.16b, v29.16b, v13.16b
  642. eor v30.16b, v30.16b, v14.16b
  643. eor v31.16b, v31.16b, v15.16b
  644. st1 {v28.16b-v31.16b}, [x1]
  645. .Lout: frame_pop
  646. ret
  647. // fewer than 128 bytes of in/output
  648. 0: ld1 {v8.16b}, [x10]
  649. ld1 {v9.16b}, [x11]
  650. movi v10.16b, #16
  651. sub x2, x1, #64
  652. add x1, x1, x5
  653. ld1 {v16.16b-v19.16b}, [x2]
  654. tbl v4.16b, {v0.16b-v3.16b}, v8.16b
  655. tbx v20.16b, {v16.16b-v19.16b}, v9.16b
  656. add v8.16b, v8.16b, v10.16b
  657. add v9.16b, v9.16b, v10.16b
  658. tbl v5.16b, {v0.16b-v3.16b}, v8.16b
  659. tbx v21.16b, {v16.16b-v19.16b}, v9.16b
  660. add v8.16b, v8.16b, v10.16b
  661. add v9.16b, v9.16b, v10.16b
  662. tbl v6.16b, {v0.16b-v3.16b}, v8.16b
  663. tbx v22.16b, {v16.16b-v19.16b}, v9.16b
  664. add v8.16b, v8.16b, v10.16b
  665. add v9.16b, v9.16b, v10.16b
  666. tbl v7.16b, {v0.16b-v3.16b}, v8.16b
  667. tbx v23.16b, {v16.16b-v19.16b}, v9.16b
  668. eor v20.16b, v20.16b, v4.16b
  669. eor v21.16b, v21.16b, v5.16b
  670. eor v22.16b, v22.16b, v6.16b
  671. eor v23.16b, v23.16b, v7.16b
  672. st1 {v20.16b-v23.16b}, [x1]
  673. b .Lout
  674. // fewer than 192 bytes of in/output
  675. 1: ld1 {v8.16b}, [x10]
  676. ld1 {v9.16b}, [x11]
  677. movi v10.16b, #16
  678. add x1, x1, x6
  679. tbl v0.16b, {v4.16b-v7.16b}, v8.16b
  680. tbx v20.16b, {v16.16b-v19.16b}, v9.16b
  681. add v8.16b, v8.16b, v10.16b
  682. add v9.16b, v9.16b, v10.16b
  683. tbl v1.16b, {v4.16b-v7.16b}, v8.16b
  684. tbx v21.16b, {v16.16b-v19.16b}, v9.16b
  685. add v8.16b, v8.16b, v10.16b
  686. add v9.16b, v9.16b, v10.16b
  687. tbl v2.16b, {v4.16b-v7.16b}, v8.16b
  688. tbx v22.16b, {v16.16b-v19.16b}, v9.16b
  689. add v8.16b, v8.16b, v10.16b
  690. add v9.16b, v9.16b, v10.16b
  691. tbl v3.16b, {v4.16b-v7.16b}, v8.16b
  692. tbx v23.16b, {v16.16b-v19.16b}, v9.16b
  693. eor v20.16b, v20.16b, v0.16b
  694. eor v21.16b, v21.16b, v1.16b
  695. eor v22.16b, v22.16b, v2.16b
  696. eor v23.16b, v23.16b, v3.16b
  697. st1 {v20.16b-v23.16b}, [x1]
  698. b .Lout
  699. // fewer than 256 bytes of in/output
  700. 2: ld1 {v4.16b}, [x10]
  701. ld1 {v5.16b}, [x11]
  702. movi v6.16b, #16
  703. add x1, x1, x7
  704. tbl v0.16b, {v8.16b-v11.16b}, v4.16b
  705. tbx v24.16b, {v20.16b-v23.16b}, v5.16b
  706. add v4.16b, v4.16b, v6.16b
  707. add v5.16b, v5.16b, v6.16b
  708. tbl v1.16b, {v8.16b-v11.16b}, v4.16b
  709. tbx v25.16b, {v20.16b-v23.16b}, v5.16b
  710. add v4.16b, v4.16b, v6.16b
  711. add v5.16b, v5.16b, v6.16b
  712. tbl v2.16b, {v8.16b-v11.16b}, v4.16b
  713. tbx v26.16b, {v20.16b-v23.16b}, v5.16b
  714. add v4.16b, v4.16b, v6.16b
  715. add v5.16b, v5.16b, v6.16b
  716. tbl v3.16b, {v8.16b-v11.16b}, v4.16b
  717. tbx v27.16b, {v20.16b-v23.16b}, v5.16b
  718. eor v24.16b, v24.16b, v0.16b
  719. eor v25.16b, v25.16b, v1.16b
  720. eor v26.16b, v26.16b, v2.16b
  721. eor v27.16b, v27.16b, v3.16b
  722. st1 {v24.16b-v27.16b}, [x1]
  723. b .Lout
  724. // fewer than 320 bytes of in/output
  725. 3: ld1 {v4.16b}, [x10]
  726. ld1 {v5.16b}, [x11]
  727. movi v6.16b, #16
  728. add x1, x1, x8
  729. tbl v0.16b, {v12.16b-v15.16b}, v4.16b
  730. tbx v28.16b, {v24.16b-v27.16b}, v5.16b
  731. add v4.16b, v4.16b, v6.16b
  732. add v5.16b, v5.16b, v6.16b
  733. tbl v1.16b, {v12.16b-v15.16b}, v4.16b
  734. tbx v29.16b, {v24.16b-v27.16b}, v5.16b
  735. add v4.16b, v4.16b, v6.16b
  736. add v5.16b, v5.16b, v6.16b
  737. tbl v2.16b, {v12.16b-v15.16b}, v4.16b
  738. tbx v30.16b, {v24.16b-v27.16b}, v5.16b
  739. add v4.16b, v4.16b, v6.16b
  740. add v5.16b, v5.16b, v6.16b
  741. tbl v3.16b, {v12.16b-v15.16b}, v4.16b
  742. tbx v31.16b, {v24.16b-v27.16b}, v5.16b
  743. eor v28.16b, v28.16b, v0.16b
  744. eor v29.16b, v29.16b, v1.16b
  745. eor v30.16b, v30.16b, v2.16b
  746. eor v31.16b, v31.16b, v3.16b
  747. st1 {v28.16b-v31.16b}, [x1]
  748. b .Lout
  749. SYM_FUNC_END(chacha_4block_xor_neon)
  750. .section ".rodata", "a", %progbits
  751. .align L1_CACHE_SHIFT
  752. .Lpermute:
  753. .set .Li, 0
  754. .rept 192
  755. .byte (.Li - 64)
  756. .set .Li, .Li + 1
  757. .endr
  758. CTRINC: .word 1, 2, 3, 4
  759. ROT8: .word 0x02010003, 0x06050407, 0x0a09080b, 0x0e0d0c0f