blake2b-neon-core.S 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * BLAKE2b digest algorithm, NEON accelerated
  4. *
  5. * Copyright 2020 Google LLC
  6. *
  7. * Author: Eric Biggers <ebiggers@google.com>
  8. */
  9. #include <linux/linkage.h>
  10. .text
  11. .fpu neon
  12. // The arguments to blake2b_compress_neon()
  13. STATE .req r0
  14. BLOCK .req r1
  15. NBLOCKS .req r2
  16. INC .req r3
  17. // Pointers to the rotation tables
  18. ROR24_TABLE .req r4
  19. ROR16_TABLE .req r5
  20. // The original stack pointer
  21. ORIG_SP .req r6
  22. // NEON registers which contain the message words of the current block.
  23. // M_0-M_3 are occasionally used for other purposes too.
  24. M_0 .req d16
  25. M_1 .req d17
  26. M_2 .req d18
  27. M_3 .req d19
  28. M_4 .req d20
  29. M_5 .req d21
  30. M_6 .req d22
  31. M_7 .req d23
  32. M_8 .req d24
  33. M_9 .req d25
  34. M_10 .req d26
  35. M_11 .req d27
  36. M_12 .req d28
  37. M_13 .req d29
  38. M_14 .req d30
  39. M_15 .req d31
  40. .align 4
  41. // Tables for computing ror64(x, 24) and ror64(x, 16) using the vtbl.8
  42. // instruction. This is the most efficient way to implement these
  43. // rotation amounts with NEON. (On Cortex-A53 it's the same speed as
  44. // vshr.u64 + vsli.u64, while on Cortex-A7 it's faster.)
  45. .Lror24_table:
  46. .byte 3, 4, 5, 6, 7, 0, 1, 2
  47. .Lror16_table:
  48. .byte 2, 3, 4, 5, 6, 7, 0, 1
  49. // The BLAKE2b initialization vector
  50. .Lblake2b_IV:
  51. .quad 0x6a09e667f3bcc908, 0xbb67ae8584caa73b
  52. .quad 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1
  53. .quad 0x510e527fade682d1, 0x9b05688c2b3e6c1f
  54. .quad 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179
  55. // Execute one round of BLAKE2b by updating the state matrix v[0..15] in the
  56. // NEON registers q0-q7. The message block is in q8..q15 (M_0-M_15). The stack
  57. // pointer points to a 32-byte aligned buffer containing a copy of q8 and q9
  58. // (M_0-M_3), so that they can be reloaded if they are used as temporary
  59. // registers. The macro arguments s0-s15 give the order in which the message
  60. // words are used in this round. 'final' is 1 if this is the final round.
  61. .macro _blake2b_round s0, s1, s2, s3, s4, s5, s6, s7, \
  62. s8, s9, s10, s11, s12, s13, s14, s15, final=0
  63. // Mix the columns:
  64. // (v[0], v[4], v[8], v[12]), (v[1], v[5], v[9], v[13]),
  65. // (v[2], v[6], v[10], v[14]), and (v[3], v[7], v[11], v[15]).
  66. // a += b + m[blake2b_sigma[r][2*i + 0]];
  67. vadd.u64 q0, q0, q2
  68. vadd.u64 q1, q1, q3
  69. vadd.u64 d0, d0, M_\s0
  70. vadd.u64 d1, d1, M_\s2
  71. vadd.u64 d2, d2, M_\s4
  72. vadd.u64 d3, d3, M_\s6
  73. // d = ror64(d ^ a, 32);
  74. veor q6, q6, q0
  75. veor q7, q7, q1
  76. vrev64.32 q6, q6
  77. vrev64.32 q7, q7
  78. // c += d;
  79. vadd.u64 q4, q4, q6
  80. vadd.u64 q5, q5, q7
  81. // b = ror64(b ^ c, 24);
  82. vld1.8 {M_0}, [ROR24_TABLE, :64]
  83. veor q2, q2, q4
  84. veor q3, q3, q5
  85. vtbl.8 d4, {d4}, M_0
  86. vtbl.8 d5, {d5}, M_0
  87. vtbl.8 d6, {d6}, M_0
  88. vtbl.8 d7, {d7}, M_0
  89. // a += b + m[blake2b_sigma[r][2*i + 1]];
  90. //
  91. // M_0 got clobbered above, so we have to reload it if any of the four
  92. // message words this step needs happens to be M_0. Otherwise we don't
  93. // need to reload it here, as it will just get clobbered again below.
  94. .if \s1 == 0 || \s3 == 0 || \s5 == 0 || \s7 == 0
  95. vld1.8 {M_0}, [sp, :64]
  96. .endif
  97. vadd.u64 q0, q0, q2
  98. vadd.u64 q1, q1, q3
  99. vadd.u64 d0, d0, M_\s1
  100. vadd.u64 d1, d1, M_\s3
  101. vadd.u64 d2, d2, M_\s5
  102. vadd.u64 d3, d3, M_\s7
  103. // d = ror64(d ^ a, 16);
  104. vld1.8 {M_0}, [ROR16_TABLE, :64]
  105. veor q6, q6, q0
  106. veor q7, q7, q1
  107. vtbl.8 d12, {d12}, M_0
  108. vtbl.8 d13, {d13}, M_0
  109. vtbl.8 d14, {d14}, M_0
  110. vtbl.8 d15, {d15}, M_0
  111. // c += d;
  112. vadd.u64 q4, q4, q6
  113. vadd.u64 q5, q5, q7
  114. // b = ror64(b ^ c, 63);
  115. //
  116. // This rotation amount isn't a multiple of 8, so it has to be
  117. // implemented using a pair of shifts, which requires temporary
  118. // registers. Use q8-q9 (M_0-M_3) for this, and reload them afterwards.
  119. veor q8, q2, q4
  120. veor q9, q3, q5
  121. vshr.u64 q2, q8, #63
  122. vshr.u64 q3, q9, #63
  123. vsli.u64 q2, q8, #1
  124. vsli.u64 q3, q9, #1
  125. vld1.8 {q8-q9}, [sp, :256]
  126. // Mix the diagonals:
  127. // (v[0], v[5], v[10], v[15]), (v[1], v[6], v[11], v[12]),
  128. // (v[2], v[7], v[8], v[13]), and (v[3], v[4], v[9], v[14]).
  129. //
  130. // There are two possible ways to do this: use 'vext' instructions to
  131. // shift the rows of the matrix so that the diagonals become columns,
  132. // and undo it afterwards; or just use 64-bit operations on 'd'
  133. // registers instead of 128-bit operations on 'q' registers. We use the
  134. // latter approach, as it performs much better on Cortex-A7.
  135. // a += b + m[blake2b_sigma[r][2*i + 0]];
  136. vadd.u64 d0, d0, d5
  137. vadd.u64 d1, d1, d6
  138. vadd.u64 d2, d2, d7
  139. vadd.u64 d3, d3, d4
  140. vadd.u64 d0, d0, M_\s8
  141. vadd.u64 d1, d1, M_\s10
  142. vadd.u64 d2, d2, M_\s12
  143. vadd.u64 d3, d3, M_\s14
  144. // d = ror64(d ^ a, 32);
  145. veor d15, d15, d0
  146. veor d12, d12, d1
  147. veor d13, d13, d2
  148. veor d14, d14, d3
  149. vrev64.32 d15, d15
  150. vrev64.32 d12, d12
  151. vrev64.32 d13, d13
  152. vrev64.32 d14, d14
  153. // c += d;
  154. vadd.u64 d10, d10, d15
  155. vadd.u64 d11, d11, d12
  156. vadd.u64 d8, d8, d13
  157. vadd.u64 d9, d9, d14
  158. // b = ror64(b ^ c, 24);
  159. vld1.8 {M_0}, [ROR24_TABLE, :64]
  160. veor d5, d5, d10
  161. veor d6, d6, d11
  162. veor d7, d7, d8
  163. veor d4, d4, d9
  164. vtbl.8 d5, {d5}, M_0
  165. vtbl.8 d6, {d6}, M_0
  166. vtbl.8 d7, {d7}, M_0
  167. vtbl.8 d4, {d4}, M_0
  168. // a += b + m[blake2b_sigma[r][2*i + 1]];
  169. .if \s9 == 0 || \s11 == 0 || \s13 == 0 || \s15 == 0
  170. vld1.8 {M_0}, [sp, :64]
  171. .endif
  172. vadd.u64 d0, d0, d5
  173. vadd.u64 d1, d1, d6
  174. vadd.u64 d2, d2, d7
  175. vadd.u64 d3, d3, d4
  176. vadd.u64 d0, d0, M_\s9
  177. vadd.u64 d1, d1, M_\s11
  178. vadd.u64 d2, d2, M_\s13
  179. vadd.u64 d3, d3, M_\s15
  180. // d = ror64(d ^ a, 16);
  181. vld1.8 {M_0}, [ROR16_TABLE, :64]
  182. veor d15, d15, d0
  183. veor d12, d12, d1
  184. veor d13, d13, d2
  185. veor d14, d14, d3
  186. vtbl.8 d12, {d12}, M_0
  187. vtbl.8 d13, {d13}, M_0
  188. vtbl.8 d14, {d14}, M_0
  189. vtbl.8 d15, {d15}, M_0
  190. // c += d;
  191. vadd.u64 d10, d10, d15
  192. vadd.u64 d11, d11, d12
  193. vadd.u64 d8, d8, d13
  194. vadd.u64 d9, d9, d14
  195. // b = ror64(b ^ c, 63);
  196. veor d16, d4, d9
  197. veor d17, d5, d10
  198. veor d18, d6, d11
  199. veor d19, d7, d8
  200. vshr.u64 q2, q8, #63
  201. vshr.u64 q3, q9, #63
  202. vsli.u64 q2, q8, #1
  203. vsli.u64 q3, q9, #1
  204. // Reloading q8-q9 can be skipped on the final round.
  205. .if ! \final
  206. vld1.8 {q8-q9}, [sp, :256]
  207. .endif
  208. .endm
  209. //
  210. // void blake2b_compress_neon(struct blake2b_state *state,
  211. // const u8 *block, size_t nblocks, u32 inc);
  212. //
  213. // Only the first three fields of struct blake2b_state are used:
  214. // u64 h[8]; (inout)
  215. // u64 t[2]; (inout)
  216. // u64 f[2]; (in)
  217. //
  218. .align 5
  219. ENTRY(blake2b_compress_neon)
  220. push {r4-r10}
  221. // Allocate a 32-byte stack buffer that is 32-byte aligned.
  222. mov ORIG_SP, sp
  223. sub ip, sp, #32
  224. bic ip, ip, #31
  225. mov sp, ip
  226. adr ROR24_TABLE, .Lror24_table
  227. adr ROR16_TABLE, .Lror16_table
  228. mov ip, STATE
  229. vld1.64 {q0-q1}, [ip]! // Load h[0..3]
  230. vld1.64 {q2-q3}, [ip]! // Load h[4..7]
  231. .Lnext_block:
  232. adr r10, .Lblake2b_IV
  233. vld1.64 {q14-q15}, [ip] // Load t[0..1] and f[0..1]
  234. vld1.64 {q4-q5}, [r10]! // Load IV[0..3]
  235. vmov r7, r8, d28 // Copy t[0] to (r7, r8)
  236. vld1.64 {q6-q7}, [r10] // Load IV[4..7]
  237. adds r7, r7, INC // Increment counter
  238. bcs .Lslow_inc_ctr
  239. vmov.i32 d28[0], r7
  240. vst1.64 {d28}, [ip] // Update t[0]
  241. .Linc_ctr_done:
  242. // Load the next message block and finish initializing the state matrix
  243. // 'v'. Fortunately, there are exactly enough NEON registers to fit the
  244. // entire state matrix in q0-q7 and the entire message block in q8-15.
  245. //
  246. // However, _blake2b_round also needs some extra registers for rotates,
  247. // so we have to spill some registers. It's better to spill the message
  248. // registers than the state registers, as the message doesn't change.
  249. // Therefore we store a copy of the first 32 bytes of the message block
  250. // (q8-q9) in an aligned buffer on the stack so that they can be
  251. // reloaded when needed. (We could just reload directly from the
  252. // message buffer, but it's faster to use aligned loads.)
  253. vld1.8 {q8-q9}, [BLOCK]!
  254. veor q6, q6, q14 // v[12..13] = IV[4..5] ^ t[0..1]
  255. vld1.8 {q10-q11}, [BLOCK]!
  256. veor q7, q7, q15 // v[14..15] = IV[6..7] ^ f[0..1]
  257. vld1.8 {q12-q13}, [BLOCK]!
  258. vst1.8 {q8-q9}, [sp, :256]
  259. mov ip, STATE
  260. vld1.8 {q14-q15}, [BLOCK]!
  261. // Execute the rounds. Each round is provided the order in which it
  262. // needs to use the message words.
  263. _blake2b_round 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
  264. _blake2b_round 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3
  265. _blake2b_round 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4
  266. _blake2b_round 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8
  267. _blake2b_round 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13
  268. _blake2b_round 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9
  269. _blake2b_round 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11
  270. _blake2b_round 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10
  271. _blake2b_round 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5
  272. _blake2b_round 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0
  273. _blake2b_round 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
  274. _blake2b_round 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 \
  275. final=1
  276. // Fold the final state matrix into the hash chaining value:
  277. //
  278. // for (i = 0; i < 8; i++)
  279. // h[i] ^= v[i] ^ v[i + 8];
  280. //
  281. vld1.64 {q8-q9}, [ip]! // Load old h[0..3]
  282. veor q0, q0, q4 // v[0..1] ^= v[8..9]
  283. veor q1, q1, q5 // v[2..3] ^= v[10..11]
  284. vld1.64 {q10-q11}, [ip] // Load old h[4..7]
  285. veor q2, q2, q6 // v[4..5] ^= v[12..13]
  286. veor q3, q3, q7 // v[6..7] ^= v[14..15]
  287. veor q0, q0, q8 // v[0..1] ^= h[0..1]
  288. veor q1, q1, q9 // v[2..3] ^= h[2..3]
  289. mov ip, STATE
  290. subs NBLOCKS, NBLOCKS, #1 // nblocks--
  291. vst1.64 {q0-q1}, [ip]! // Store new h[0..3]
  292. veor q2, q2, q10 // v[4..5] ^= h[4..5]
  293. veor q3, q3, q11 // v[6..7] ^= h[6..7]
  294. vst1.64 {q2-q3}, [ip]! // Store new h[4..7]
  295. // Advance to the next block, if there is one.
  296. bne .Lnext_block // nblocks != 0?
  297. mov sp, ORIG_SP
  298. pop {r4-r10}
  299. mov pc, lr
  300. .Lslow_inc_ctr:
  301. // Handle the case where the counter overflowed its low 32 bits, by
  302. // carrying the overflow bit into the full 128-bit counter.
  303. vmov r9, r10, d29
  304. adcs r8, r8, #0
  305. adcs r9, r9, #0
  306. adc r10, r10, #0
  307. vmov d28, r7, r8
  308. vmov d29, r9, r10
  309. vst1.64 {q14}, [ip] // Update t[0] and t[1]
  310. b .Linc_ctr_done
  311. ENDPROC(blake2b_compress_neon)