sha256-spe-asm.S 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * Fast SHA-256 implementation for SPE instruction set (PPC)
  4. *
  5. * This code makes use of the SPE SIMD instruction set as defined in
  6. * http://cache.freescale.com/files/32bit/doc/ref_manual/SPEPIM.pdf
  7. * Implementation is based on optimization guide notes from
  8. * http://cache.freescale.com/files/32bit/doc/app_note/AN2665.pdf
  9. *
  10. * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de>
  11. */
  12. #include <asm/ppc_asm.h>
  13. #include <asm/asm-offsets.h>
  14. #define rHP r3 /* pointer to hash values in memory */
  15. #define rKP r24 /* pointer to round constants */
  16. #define rWP r4 /* pointer to input data */
  17. #define rH0 r5 /* 8 32 bit hash values in 8 registers */
  18. #define rH1 r6
  19. #define rH2 r7
  20. #define rH3 r8
  21. #define rH4 r9
  22. #define rH5 r10
  23. #define rH6 r11
  24. #define rH7 r12
  25. #define rW0 r14 /* 64 bit registers. 16 words in 8 registers */
  26. #define rW1 r15
  27. #define rW2 r16
  28. #define rW3 r17
  29. #define rW4 r18
  30. #define rW5 r19
  31. #define rW6 r20
  32. #define rW7 r21
  33. #define rT0 r22 /* 64 bit temporaries */
  34. #define rT1 r23
  35. #define rT2 r0 /* 32 bit temporaries */
  36. #define rT3 r25
  37. #define CMP_KN_LOOP
  38. #define CMP_KC_LOOP \
  39. cmpwi rT1,0;
  40. #define INITIALIZE \
  41. stwu r1,-128(r1); /* create stack frame */ \
  42. evstdw r14,8(r1); /* We must save non volatile */ \
  43. evstdw r15,16(r1); /* registers. Take the chance */ \
  44. evstdw r16,24(r1); /* and save the SPE part too */ \
  45. evstdw r17,32(r1); \
  46. evstdw r18,40(r1); \
  47. evstdw r19,48(r1); \
  48. evstdw r20,56(r1); \
  49. evstdw r21,64(r1); \
  50. evstdw r22,72(r1); \
  51. evstdw r23,80(r1); \
  52. stw r24,88(r1); /* save normal registers */ \
  53. stw r25,92(r1);
  54. #define FINALIZE \
  55. evldw r14,8(r1); /* restore SPE registers */ \
  56. evldw r15,16(r1); \
  57. evldw r16,24(r1); \
  58. evldw r17,32(r1); \
  59. evldw r18,40(r1); \
  60. evldw r19,48(r1); \
  61. evldw r20,56(r1); \
  62. evldw r21,64(r1); \
  63. evldw r22,72(r1); \
  64. evldw r23,80(r1); \
  65. lwz r24,88(r1); /* restore normal registers */ \
  66. lwz r25,92(r1); \
  67. xor r0,r0,r0; \
  68. stw r0,8(r1); /* Delete sensitive data */ \
  69. stw r0,16(r1); /* that we might have pushed */ \
  70. stw r0,24(r1); /* from other context that runs */ \
  71. stw r0,32(r1); /* the same code. Assume that */ \
  72. stw r0,40(r1); /* the lower part of the GPRs */ \
  73. stw r0,48(r1); /* was already overwritten on */ \
  74. stw r0,56(r1); /* the way down to here */ \
  75. stw r0,64(r1); \
  76. stw r0,72(r1); \
  77. stw r0,80(r1); \
  78. addi r1,r1,128; /* cleanup stack frame */
  79. #ifdef __BIG_ENDIAN__
  80. #define LOAD_DATA(reg, off) \
  81. lwz reg,off(rWP); /* load data */
  82. #define NEXT_BLOCK \
  83. addi rWP,rWP,64; /* increment per block */
  84. #else
  85. #define LOAD_DATA(reg, off) \
  86. lwbrx reg,0,rWP; /* load data */ \
  87. addi rWP,rWP,4; /* increment per word */
  88. #define NEXT_BLOCK /* nothing to do */
  89. #endif
  90. #define R_LOAD_W(a, b, c, d, e, f, g, h, w, off) \
  91. LOAD_DATA(w, off) /* 1: W */ \
  92. rotrwi rT0,e,6; /* 1: S1 = e rotr 6 */ \
  93. rotrwi rT1,e,11; /* 1: S1' = e rotr 11 */ \
  94. rotrwi rT2,e,25; /* 1: S1" = e rotr 25 */ \
  95. xor rT0,rT0,rT1; /* 1: S1 = S1 xor S1' */ \
  96. and rT3,e,f; /* 1: ch = e and f */ \
  97. xor rT0,rT0,rT2; /* 1: S1 = S1 xor S1" */ \
  98. andc rT1,g,e; /* 1: ch' = ~e and g */ \
  99. lwz rT2,off(rKP); /* 1: K */ \
  100. xor rT3,rT3,rT1; /* 1: ch = ch xor ch' */ \
  101. add h,h,rT0; /* 1: temp1 = h + S1 */ \
  102. add rT3,rT3,w; /* 1: temp1' = ch + w */ \
  103. rotrwi rT0,a,2; /* 1: S0 = a rotr 2 */ \
  104. add h,h,rT3; /* 1: temp1 = temp1 + temp1' */ \
  105. rotrwi rT1,a,13; /* 1: S0' = a rotr 13 */ \
  106. add h,h,rT2; /* 1: temp1 = temp1 + K */ \
  107. rotrwi rT3,a,22; /* 1: S0" = a rotr 22 */ \
  108. xor rT0,rT0,rT1; /* 1: S0 = S0 xor S0' */ \
  109. add d,d,h; /* 1: d = d + temp1 */ \
  110. xor rT3,rT0,rT3; /* 1: S0 = S0 xor S0" */ \
  111. evmergelo w,w,w; /* shift W */ \
  112. or rT2,a,b; /* 1: maj = a or b */ \
  113. and rT1,a,b; /* 1: maj' = a and b */ \
  114. and rT2,rT2,c; /* 1: maj = maj and c */ \
  115. LOAD_DATA(w, off+4) /* 2: W */ \
  116. or rT2,rT1,rT2; /* 1: maj = maj or maj' */ \
  117. rotrwi rT0,d,6; /* 2: S1 = e rotr 6 */ \
  118. add rT3,rT3,rT2; /* 1: temp2 = S0 + maj */ \
  119. rotrwi rT1,d,11; /* 2: S1' = e rotr 11 */ \
  120. add h,h,rT3; /* 1: h = temp1 + temp2 */ \
  121. rotrwi rT2,d,25; /* 2: S1" = e rotr 25 */ \
  122. xor rT0,rT0,rT1; /* 2: S1 = S1 xor S1' */ \
  123. and rT3,d,e; /* 2: ch = e and f */ \
  124. xor rT0,rT0,rT2; /* 2: S1 = S1 xor S1" */ \
  125. andc rT1,f,d; /* 2: ch' = ~e and g */ \
  126. lwz rT2,off+4(rKP); /* 2: K */ \
  127. xor rT3,rT3,rT1; /* 2: ch = ch xor ch' */ \
  128. add g,g,rT0; /* 2: temp1 = h + S1 */ \
  129. add rT3,rT3,w; /* 2: temp1' = ch + w */ \
  130. rotrwi rT0,h,2; /* 2: S0 = a rotr 2 */ \
  131. add g,g,rT3; /* 2: temp1 = temp1 + temp1' */ \
  132. rotrwi rT1,h,13; /* 2: S0' = a rotr 13 */ \
  133. add g,g,rT2; /* 2: temp1 = temp1 + K */ \
  134. rotrwi rT3,h,22; /* 2: S0" = a rotr 22 */ \
  135. xor rT0,rT0,rT1; /* 2: S0 = S0 xor S0' */ \
  136. or rT2,h,a; /* 2: maj = a or b */ \
  137. xor rT3,rT0,rT3; /* 2: S0 = S0 xor S0" */ \
  138. and rT1,h,a; /* 2: maj' = a and b */ \
  139. and rT2,rT2,b; /* 2: maj = maj and c */ \
  140. add c,c,g; /* 2: d = d + temp1 */ \
  141. or rT2,rT1,rT2; /* 2: maj = maj or maj' */ \
  142. add rT3,rT3,rT2; /* 2: temp2 = S0 + maj */ \
  143. add g,g,rT3 /* 2: h = temp1 + temp2 */
  144. #define R_CALC_W(a, b, c, d, e, f, g, h, w0, w1, w4, w5, w7, k, off) \
  145. rotrwi rT2,e,6; /* 1: S1 = e rotr 6 */ \
  146. evmergelohi rT0,w0,w1; /* w[-15] */ \
  147. rotrwi rT3,e,11; /* 1: S1' = e rotr 11 */ \
  148. evsrwiu rT1,rT0,3; /* s0 = w[-15] >> 3 */ \
  149. xor rT2,rT2,rT3; /* 1: S1 = S1 xor S1' */ \
  150. evrlwi rT0,rT0,25; /* s0' = w[-15] rotr 7 */ \
  151. rotrwi rT3,e,25; /* 1: S1' = e rotr 25 */ \
  152. evxor rT1,rT1,rT0; /* s0 = s0 xor s0' */ \
  153. xor rT2,rT2,rT3; /* 1: S1 = S1 xor S1' */ \
  154. evrlwi rT0,rT0,21; /* s0' = w[-15] rotr 18 */ \
  155. add h,h,rT2; /* 1: temp1 = h + S1 */ \
  156. evxor rT0,rT0,rT1; /* s0 = s0 xor s0' */ \
  157. and rT2,e,f; /* 1: ch = e and f */ \
  158. evaddw w0,w0,rT0; /* w = w[-16] + s0 */ \
  159. andc rT3,g,e; /* 1: ch' = ~e and g */ \
  160. evsrwiu rT0,w7,10; /* s1 = w[-2] >> 10 */ \
  161. xor rT2,rT2,rT3; /* 1: ch = ch xor ch' */ \
  162. evrlwi rT1,w7,15; /* s1' = w[-2] rotr 17 */ \
  163. add h,h,rT2; /* 1: temp1 = temp1 + ch */ \
  164. evxor rT0,rT0,rT1; /* s1 = s1 xor s1' */ \
  165. rotrwi rT2,a,2; /* 1: S0 = a rotr 2 */ \
  166. evrlwi rT1,w7,13; /* s1' = w[-2] rotr 19 */ \
  167. rotrwi rT3,a,13; /* 1: S0' = a rotr 13 */ \
  168. evxor rT0,rT0,rT1; /* s1 = s1 xor s1' */ \
  169. xor rT2,rT2,rT3; /* 1: S0 = S0 xor S0' */ \
  170. evldw rT1,off(rKP); /* k */ \
  171. rotrwi rT3,a,22; /* 1: S0' = a rotr 22 */ \
  172. evaddw w0,w0,rT0; /* w = w + s1 */ \
  173. xor rT2,rT2,rT3; /* 1: S0 = S0 xor S0' */ \
  174. evmergelohi rT0,w4,w5; /* w[-7] */ \
  175. and rT3,a,b; /* 1: maj = a and b */ \
  176. evaddw w0,w0,rT0; /* w = w + w[-7] */ \
  177. CMP_K##k##_LOOP \
  178. add rT2,rT2,rT3; /* 1: temp2 = S0 + maj */ \
  179. evaddw rT1,rT1,w0; /* wk = w + k */ \
  180. xor rT3,a,b; /* 1: maj = a xor b */ \
  181. evmergehi rT0,rT1,rT1; /* wk1/wk2 */ \
  182. and rT3,rT3,c; /* 1: maj = maj and c */ \
  183. add h,h,rT0; /* 1: temp1 = temp1 + wk */ \
  184. add rT2,rT2,rT3; /* 1: temp2 = temp2 + maj */ \
  185. add g,g,rT1; /* 2: temp1 = temp1 + wk */ \
  186. add d,d,h; /* 1: d = d + temp1 */ \
  187. rotrwi rT0,d,6; /* 2: S1 = e rotr 6 */ \
  188. add h,h,rT2; /* 1: h = temp1 + temp2 */ \
  189. rotrwi rT1,d,11; /* 2: S1' = e rotr 11 */ \
  190. rotrwi rT2,d,25; /* 2: S" = e rotr 25 */ \
  191. xor rT0,rT0,rT1; /* 2: S1 = S1 xor S1' */ \
  192. and rT3,d,e; /* 2: ch = e and f */ \
  193. xor rT0,rT0,rT2; /* 2: S1 = S1 xor S1" */ \
  194. andc rT1,f,d; /* 2: ch' = ~e and g */ \
  195. add g,g,rT0; /* 2: temp1 = h + S1 */ \
  196. xor rT3,rT3,rT1; /* 2: ch = ch xor ch' */ \
  197. rotrwi rT0,h,2; /* 2: S0 = a rotr 2 */ \
  198. add g,g,rT3; /* 2: temp1 = temp1 + ch */ \
  199. rotrwi rT1,h,13; /* 2: S0' = a rotr 13 */ \
  200. rotrwi rT3,h,22; /* 2: S0" = a rotr 22 */ \
  201. xor rT0,rT0,rT1; /* 2: S0 = S0 xor S0' */ \
  202. or rT2,h,a; /* 2: maj = a or b */ \
  203. and rT1,h,a; /* 2: maj' = a and b */ \
  204. and rT2,rT2,b; /* 2: maj = maj and c */ \
  205. xor rT3,rT0,rT3; /* 2: S0 = S0 xor S0" */ \
  206. or rT2,rT1,rT2; /* 2: maj = maj or maj' */ \
  207. add c,c,g; /* 2: d = d + temp1 */ \
  208. add rT3,rT3,rT2; /* 2: temp2 = S0 + maj */ \
  209. add g,g,rT3 /* 2: h = temp1 + temp2 */
  210. _GLOBAL(ppc_spe_sha256_transform)
  211. INITIALIZE
  212. mtctr r5
  213. lwz rH0,0(rHP)
  214. lwz rH1,4(rHP)
  215. lwz rH2,8(rHP)
  216. lwz rH3,12(rHP)
  217. lwz rH4,16(rHP)
  218. lwz rH5,20(rHP)
  219. lwz rH6,24(rHP)
  220. lwz rH7,28(rHP)
  221. ppc_spe_sha256_main:
  222. lis rKP,PPC_SPE_SHA256_K@ha
  223. addi rKP,rKP,PPC_SPE_SHA256_K@l
  224. R_LOAD_W(rH0, rH1, rH2, rH3, rH4, rH5, rH6, rH7, rW0, 0)
  225. R_LOAD_W(rH6, rH7, rH0, rH1, rH2, rH3, rH4, rH5, rW1, 8)
  226. R_LOAD_W(rH4, rH5, rH6, rH7, rH0, rH1, rH2, rH3, rW2, 16)
  227. R_LOAD_W(rH2, rH3, rH4, rH5, rH6, rH7, rH0, rH1, rW3, 24)
  228. R_LOAD_W(rH0, rH1, rH2, rH3, rH4, rH5, rH6, rH7, rW4, 32)
  229. R_LOAD_W(rH6, rH7, rH0, rH1, rH2, rH3, rH4, rH5, rW5, 40)
  230. R_LOAD_W(rH4, rH5, rH6, rH7, rH0, rH1, rH2, rH3, rW6, 48)
  231. R_LOAD_W(rH2, rH3, rH4, rH5, rH6, rH7, rH0, rH1, rW7, 56)
  232. ppc_spe_sha256_16_rounds:
  233. addi rKP,rKP,64
  234. R_CALC_W(rH0, rH1, rH2, rH3, rH4, rH5, rH6, rH7,
  235. rW0, rW1, rW4, rW5, rW7, N, 0)
  236. R_CALC_W(rH6, rH7, rH0, rH1, rH2, rH3, rH4, rH5,
  237. rW1, rW2, rW5, rW6, rW0, N, 8)
  238. R_CALC_W(rH4, rH5, rH6, rH7, rH0, rH1, rH2, rH3,
  239. rW2, rW3, rW6, rW7, rW1, N, 16)
  240. R_CALC_W(rH2, rH3, rH4, rH5, rH6, rH7, rH0, rH1,
  241. rW3, rW4, rW7, rW0, rW2, N, 24)
  242. R_CALC_W(rH0, rH1, rH2, rH3, rH4, rH5, rH6, rH7,
  243. rW4, rW5, rW0, rW1, rW3, N, 32)
  244. R_CALC_W(rH6, rH7, rH0, rH1, rH2, rH3, rH4, rH5,
  245. rW5, rW6, rW1, rW2, rW4, N, 40)
  246. R_CALC_W(rH4, rH5, rH6, rH7, rH0, rH1, rH2, rH3,
  247. rW6, rW7, rW2, rW3, rW5, N, 48)
  248. R_CALC_W(rH2, rH3, rH4, rH5, rH6, rH7, rH0, rH1,
  249. rW7, rW0, rW3, rW4, rW6, C, 56)
  250. bt gt,ppc_spe_sha256_16_rounds
  251. lwz rW0,0(rHP)
  252. NEXT_BLOCK
  253. lwz rW1,4(rHP)
  254. lwz rW2,8(rHP)
  255. lwz rW3,12(rHP)
  256. lwz rW4,16(rHP)
  257. lwz rW5,20(rHP)
  258. lwz rW6,24(rHP)
  259. lwz rW7,28(rHP)
  260. add rH0,rH0,rW0
  261. stw rH0,0(rHP)
  262. add rH1,rH1,rW1
  263. stw rH1,4(rHP)
  264. add rH2,rH2,rW2
  265. stw rH2,8(rHP)
  266. add rH3,rH3,rW3
  267. stw rH3,12(rHP)
  268. add rH4,rH4,rW4
  269. stw rH4,16(rHP)
  270. add rH5,rH5,rW5
  271. stw rH5,20(rHP)
  272. add rH6,rH6,rW6
  273. stw rH6,24(rHP)
  274. add rH7,rH7,rW7
  275. stw rH7,28(rHP)
  276. bdnz ppc_spe_sha256_main
  277. FINALIZE
  278. blr
  279. .data
  280. .align 5
  281. PPC_SPE_SHA256_K:
  282. .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
  283. .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
  284. .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
  285. .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
  286. .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
  287. .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
  288. .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
  289. .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
  290. .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
  291. .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
  292. .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
  293. .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
  294. .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
  295. .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
  296. .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
  297. .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2