sha1-armv4-large.S 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507
  1. #define __ARM_ARCH__ __LINUX_ARM_ARCH__
  2. @ SPDX-License-Identifier: GPL-2.0
  3. @ This code is taken from the OpenSSL project but the author (Andy Polyakov)
  4. @ has relicensed it under the GPLv2. Therefore this program is free software;
  5. @ you can redistribute it and/or modify it under the terms of the GNU General
  6. @ Public License version 2 as published by the Free Software Foundation.
  7. @
  8. @ The original headers, including the original license headers, are
  9. @ included below for completeness.
  10. @ ====================================================================
  11. @ Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
  12. @ project. The module is, however, dual licensed under OpenSSL and
  13. @ CRYPTOGAMS licenses depending on where you obtain it. For further
  14. @ details see https://www.openssl.org/~appro/cryptogams/.
  15. @ ====================================================================
  16. @ sha1_block procedure for ARMv4.
  17. @
  18. @ January 2007.
  19. @ Size/performance trade-off
  20. @ ====================================================================
  21. @ impl size in bytes comp cycles[*] measured performance
  22. @ ====================================================================
  23. @ thumb 304 3212 4420
  24. @ armv4-small 392/+29% 1958/+64% 2250/+96%
  25. @ armv4-compact 740/+89% 1552/+26% 1840/+22%
  26. @ armv4-large 1420/+92% 1307/+19% 1370/+34%[***]
  27. @ full unroll ~5100/+260% ~1260/+4% ~1300/+5%
  28. @ ====================================================================
  29. @ thumb = same as 'small' but in Thumb instructions[**] and
  30. @ with recurring code in two private functions;
  31. @ small = detached Xload/update, loops are folded;
  32. @ compact = detached Xload/update, 5x unroll;
  33. @ large = interleaved Xload/update, 5x unroll;
  34. @ full unroll = interleaved Xload/update, full unroll, estimated[!];
  35. @
  36. @ [*] Manually counted instructions in "grand" loop body. Measured
  37. @ performance is affected by prologue and epilogue overhead,
  38. @ i-cache availability, branch penalties, etc.
  39. @ [**] While each Thumb instruction is twice smaller, they are not as
  40. @ diverse as ARM ones: e.g., there are only two arithmetic
  41. @ instructions with 3 arguments, no [fixed] rotate, addressing
  42. @ modes are limited. As result it takes more instructions to do
  43. @ the same job in Thumb, therefore the code is never twice as
  44. @ small and always slower.
  45. @ [***] which is also ~35% better than compiler generated code. Dual-
  46. @ issue Cortex A8 core was measured to process input block in
  47. @ ~990 cycles.
  48. @ August 2010.
  49. @
  50. @ Rescheduling for dual-issue pipeline resulted in 13% improvement on
  51. @ Cortex A8 core and in absolute terms ~870 cycles per input block
  52. @ [or 13.6 cycles per byte].
  53. @ February 2011.
  54. @
  55. @ Profiler-assisted and platform-specific optimization resulted in 10%
  56. @ improvement on Cortex A8 core and 12.2 cycles per byte.
  57. #include <linux/linkage.h>
  58. .text
  59. .align 2
  60. ENTRY(sha1_block_data_order)
  61. stmdb sp!,{r4-r12,lr}
  62. add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
  63. ldmia r0,{r3,r4,r5,r6,r7}
  64. .Lloop:
  65. ldr r8,.LK_00_19
  66. mov r14,sp
  67. sub sp,sp,#15*4
  68. mov r5,r5,ror#30
  69. mov r6,r6,ror#30
  70. mov r7,r7,ror#30 @ [6]
  71. .L_00_15:
  72. #if __ARM_ARCH__<7
  73. ldrb r10,[r1,#2]
  74. ldrb r9,[r1,#3]
  75. ldrb r11,[r1,#1]
  76. add r7,r8,r7,ror#2 @ E+=K_00_19
  77. ldrb r12,[r1],#4
  78. orr r9,r9,r10,lsl#8
  79. eor r10,r5,r6 @ F_xx_xx
  80. orr r9,r9,r11,lsl#16
  81. add r7,r7,r3,ror#27 @ E+=ROR(A,27)
  82. orr r9,r9,r12,lsl#24
  83. #else
  84. ldr r9,[r1],#4 @ handles unaligned
  85. add r7,r8,r7,ror#2 @ E+=K_00_19
  86. eor r10,r5,r6 @ F_xx_xx
  87. add r7,r7,r3,ror#27 @ E+=ROR(A,27)
  88. #ifdef __ARMEL__
  89. rev r9,r9 @ byte swap
  90. #endif
  91. #endif
  92. and r10,r4,r10,ror#2
  93. add r7,r7,r9 @ E+=X[i]
  94. eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
  95. str r9,[r14,#-4]!
  96. add r7,r7,r10 @ E+=F_00_19(B,C,D)
  97. #if __ARM_ARCH__<7
  98. ldrb r10,[r1,#2]
  99. ldrb r9,[r1,#3]
  100. ldrb r11,[r1,#1]
  101. add r6,r8,r6,ror#2 @ E+=K_00_19
  102. ldrb r12,[r1],#4
  103. orr r9,r9,r10,lsl#8
  104. eor r10,r4,r5 @ F_xx_xx
  105. orr r9,r9,r11,lsl#16
  106. add r6,r6,r7,ror#27 @ E+=ROR(A,27)
  107. orr r9,r9,r12,lsl#24
  108. #else
  109. ldr r9,[r1],#4 @ handles unaligned
  110. add r6,r8,r6,ror#2 @ E+=K_00_19
  111. eor r10,r4,r5 @ F_xx_xx
  112. add r6,r6,r7,ror#27 @ E+=ROR(A,27)
  113. #ifdef __ARMEL__
  114. rev r9,r9 @ byte swap
  115. #endif
  116. #endif
  117. and r10,r3,r10,ror#2
  118. add r6,r6,r9 @ E+=X[i]
  119. eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
  120. str r9,[r14,#-4]!
  121. add r6,r6,r10 @ E+=F_00_19(B,C,D)
  122. #if __ARM_ARCH__<7
  123. ldrb r10,[r1,#2]
  124. ldrb r9,[r1,#3]
  125. ldrb r11,[r1,#1]
  126. add r5,r8,r5,ror#2 @ E+=K_00_19
  127. ldrb r12,[r1],#4
  128. orr r9,r9,r10,lsl#8
  129. eor r10,r3,r4 @ F_xx_xx
  130. orr r9,r9,r11,lsl#16
  131. add r5,r5,r6,ror#27 @ E+=ROR(A,27)
  132. orr r9,r9,r12,lsl#24
  133. #else
  134. ldr r9,[r1],#4 @ handles unaligned
  135. add r5,r8,r5,ror#2 @ E+=K_00_19
  136. eor r10,r3,r4 @ F_xx_xx
  137. add r5,r5,r6,ror#27 @ E+=ROR(A,27)
  138. #ifdef __ARMEL__
  139. rev r9,r9 @ byte swap
  140. #endif
  141. #endif
  142. and r10,r7,r10,ror#2
  143. add r5,r5,r9 @ E+=X[i]
  144. eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
  145. str r9,[r14,#-4]!
  146. add r5,r5,r10 @ E+=F_00_19(B,C,D)
  147. #if __ARM_ARCH__<7
  148. ldrb r10,[r1,#2]
  149. ldrb r9,[r1,#3]
  150. ldrb r11,[r1,#1]
  151. add r4,r8,r4,ror#2 @ E+=K_00_19
  152. ldrb r12,[r1],#4
  153. orr r9,r9,r10,lsl#8
  154. eor r10,r7,r3 @ F_xx_xx
  155. orr r9,r9,r11,lsl#16
  156. add r4,r4,r5,ror#27 @ E+=ROR(A,27)
  157. orr r9,r9,r12,lsl#24
  158. #else
  159. ldr r9,[r1],#4 @ handles unaligned
  160. add r4,r8,r4,ror#2 @ E+=K_00_19
  161. eor r10,r7,r3 @ F_xx_xx
  162. add r4,r4,r5,ror#27 @ E+=ROR(A,27)
  163. #ifdef __ARMEL__
  164. rev r9,r9 @ byte swap
  165. #endif
  166. #endif
  167. and r10,r6,r10,ror#2
  168. add r4,r4,r9 @ E+=X[i]
  169. eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
  170. str r9,[r14,#-4]!
  171. add r4,r4,r10 @ E+=F_00_19(B,C,D)
  172. #if __ARM_ARCH__<7
  173. ldrb r10,[r1,#2]
  174. ldrb r9,[r1,#3]
  175. ldrb r11,[r1,#1]
  176. add r3,r8,r3,ror#2 @ E+=K_00_19
  177. ldrb r12,[r1],#4
  178. orr r9,r9,r10,lsl#8
  179. eor r10,r6,r7 @ F_xx_xx
  180. orr r9,r9,r11,lsl#16
  181. add r3,r3,r4,ror#27 @ E+=ROR(A,27)
  182. orr r9,r9,r12,lsl#24
  183. #else
  184. ldr r9,[r1],#4 @ handles unaligned
  185. add r3,r8,r3,ror#2 @ E+=K_00_19
  186. eor r10,r6,r7 @ F_xx_xx
  187. add r3,r3,r4,ror#27 @ E+=ROR(A,27)
  188. #ifdef __ARMEL__
  189. rev r9,r9 @ byte swap
  190. #endif
  191. #endif
  192. and r10,r5,r10,ror#2
  193. add r3,r3,r9 @ E+=X[i]
  194. eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
  195. str r9,[r14,#-4]!
  196. add r3,r3,r10 @ E+=F_00_19(B,C,D)
  197. cmp r14,sp
  198. bne .L_00_15 @ [((11+4)*5+2)*3]
  199. sub sp,sp,#25*4
  200. #if __ARM_ARCH__<7
  201. ldrb r10,[r1,#2]
  202. ldrb r9,[r1,#3]
  203. ldrb r11,[r1,#1]
  204. add r7,r8,r7,ror#2 @ E+=K_00_19
  205. ldrb r12,[r1],#4
  206. orr r9,r9,r10,lsl#8
  207. eor r10,r5,r6 @ F_xx_xx
  208. orr r9,r9,r11,lsl#16
  209. add r7,r7,r3,ror#27 @ E+=ROR(A,27)
  210. orr r9,r9,r12,lsl#24
  211. #else
  212. ldr r9,[r1],#4 @ handles unaligned
  213. add r7,r8,r7,ror#2 @ E+=K_00_19
  214. eor r10,r5,r6 @ F_xx_xx
  215. add r7,r7,r3,ror#27 @ E+=ROR(A,27)
  216. #ifdef __ARMEL__
  217. rev r9,r9 @ byte swap
  218. #endif
  219. #endif
  220. and r10,r4,r10,ror#2
  221. add r7,r7,r9 @ E+=X[i]
  222. eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
  223. str r9,[r14,#-4]!
  224. add r7,r7,r10 @ E+=F_00_19(B,C,D)
  225. ldr r9,[r14,#15*4]
  226. ldr r10,[r14,#13*4]
  227. ldr r11,[r14,#7*4]
  228. add r6,r8,r6,ror#2 @ E+=K_xx_xx
  229. ldr r12,[r14,#2*4]
  230. eor r9,r9,r10
  231. eor r11,r11,r12 @ 1 cycle stall
  232. eor r10,r4,r5 @ F_xx_xx
  233. mov r9,r9,ror#31
  234. add r6,r6,r7,ror#27 @ E+=ROR(A,27)
  235. eor r9,r9,r11,ror#31
  236. str r9,[r14,#-4]!
  237. and r10,r3,r10,ror#2 @ F_xx_xx
  238. @ F_xx_xx
  239. add r6,r6,r9 @ E+=X[i]
  240. eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
  241. add r6,r6,r10 @ E+=F_00_19(B,C,D)
  242. ldr r9,[r14,#15*4]
  243. ldr r10,[r14,#13*4]
  244. ldr r11,[r14,#7*4]
  245. add r5,r8,r5,ror#2 @ E+=K_xx_xx
  246. ldr r12,[r14,#2*4]
  247. eor r9,r9,r10
  248. eor r11,r11,r12 @ 1 cycle stall
  249. eor r10,r3,r4 @ F_xx_xx
  250. mov r9,r9,ror#31
  251. add r5,r5,r6,ror#27 @ E+=ROR(A,27)
  252. eor r9,r9,r11,ror#31
  253. str r9,[r14,#-4]!
  254. and r10,r7,r10,ror#2 @ F_xx_xx
  255. @ F_xx_xx
  256. add r5,r5,r9 @ E+=X[i]
  257. eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
  258. add r5,r5,r10 @ E+=F_00_19(B,C,D)
  259. ldr r9,[r14,#15*4]
  260. ldr r10,[r14,#13*4]
  261. ldr r11,[r14,#7*4]
  262. add r4,r8,r4,ror#2 @ E+=K_xx_xx
  263. ldr r12,[r14,#2*4]
  264. eor r9,r9,r10
  265. eor r11,r11,r12 @ 1 cycle stall
  266. eor r10,r7,r3 @ F_xx_xx
  267. mov r9,r9,ror#31
  268. add r4,r4,r5,ror#27 @ E+=ROR(A,27)
  269. eor r9,r9,r11,ror#31
  270. str r9,[r14,#-4]!
  271. and r10,r6,r10,ror#2 @ F_xx_xx
  272. @ F_xx_xx
  273. add r4,r4,r9 @ E+=X[i]
  274. eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
  275. add r4,r4,r10 @ E+=F_00_19(B,C,D)
  276. ldr r9,[r14,#15*4]
  277. ldr r10,[r14,#13*4]
  278. ldr r11,[r14,#7*4]
  279. add r3,r8,r3,ror#2 @ E+=K_xx_xx
  280. ldr r12,[r14,#2*4]
  281. eor r9,r9,r10
  282. eor r11,r11,r12 @ 1 cycle stall
  283. eor r10,r6,r7 @ F_xx_xx
  284. mov r9,r9,ror#31
  285. add r3,r3,r4,ror#27 @ E+=ROR(A,27)
  286. eor r9,r9,r11,ror#31
  287. str r9,[r14,#-4]!
  288. and r10,r5,r10,ror#2 @ F_xx_xx
  289. @ F_xx_xx
  290. add r3,r3,r9 @ E+=X[i]
  291. eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
  292. add r3,r3,r10 @ E+=F_00_19(B,C,D)
  293. ldr r8,.LK_20_39 @ [+15+16*4]
  294. cmn sp,#0 @ [+3], clear carry to denote 20_39
  295. .L_20_39_or_60_79:
  296. ldr r9,[r14,#15*4]
  297. ldr r10,[r14,#13*4]
  298. ldr r11,[r14,#7*4]
  299. add r7,r8,r7,ror#2 @ E+=K_xx_xx
  300. ldr r12,[r14,#2*4]
  301. eor r9,r9,r10
  302. eor r11,r11,r12 @ 1 cycle stall
  303. eor r10,r5,r6 @ F_xx_xx
  304. mov r9,r9,ror#31
  305. add r7,r7,r3,ror#27 @ E+=ROR(A,27)
  306. eor r9,r9,r11,ror#31
  307. str r9,[r14,#-4]!
  308. eor r10,r4,r10,ror#2 @ F_xx_xx
  309. @ F_xx_xx
  310. add r7,r7,r9 @ E+=X[i]
  311. add r7,r7,r10 @ E+=F_20_39(B,C,D)
  312. ldr r9,[r14,#15*4]
  313. ldr r10,[r14,#13*4]
  314. ldr r11,[r14,#7*4]
  315. add r6,r8,r6,ror#2 @ E+=K_xx_xx
  316. ldr r12,[r14,#2*4]
  317. eor r9,r9,r10
  318. eor r11,r11,r12 @ 1 cycle stall
  319. eor r10,r4,r5 @ F_xx_xx
  320. mov r9,r9,ror#31
  321. add r6,r6,r7,ror#27 @ E+=ROR(A,27)
  322. eor r9,r9,r11,ror#31
  323. str r9,[r14,#-4]!
  324. eor r10,r3,r10,ror#2 @ F_xx_xx
  325. @ F_xx_xx
  326. add r6,r6,r9 @ E+=X[i]
  327. add r6,r6,r10 @ E+=F_20_39(B,C,D)
  328. ldr r9,[r14,#15*4]
  329. ldr r10,[r14,#13*4]
  330. ldr r11,[r14,#7*4]
  331. add r5,r8,r5,ror#2 @ E+=K_xx_xx
  332. ldr r12,[r14,#2*4]
  333. eor r9,r9,r10
  334. eor r11,r11,r12 @ 1 cycle stall
  335. eor r10,r3,r4 @ F_xx_xx
  336. mov r9,r9,ror#31
  337. add r5,r5,r6,ror#27 @ E+=ROR(A,27)
  338. eor r9,r9,r11,ror#31
  339. str r9,[r14,#-4]!
  340. eor r10,r7,r10,ror#2 @ F_xx_xx
  341. @ F_xx_xx
  342. add r5,r5,r9 @ E+=X[i]
  343. add r5,r5,r10 @ E+=F_20_39(B,C,D)
  344. ldr r9,[r14,#15*4]
  345. ldr r10,[r14,#13*4]
  346. ldr r11,[r14,#7*4]
  347. add r4,r8,r4,ror#2 @ E+=K_xx_xx
  348. ldr r12,[r14,#2*4]
  349. eor r9,r9,r10
  350. eor r11,r11,r12 @ 1 cycle stall
  351. eor r10,r7,r3 @ F_xx_xx
  352. mov r9,r9,ror#31
  353. add r4,r4,r5,ror#27 @ E+=ROR(A,27)
  354. eor r9,r9,r11,ror#31
  355. str r9,[r14,#-4]!
  356. eor r10,r6,r10,ror#2 @ F_xx_xx
  357. @ F_xx_xx
  358. add r4,r4,r9 @ E+=X[i]
  359. add r4,r4,r10 @ E+=F_20_39(B,C,D)
  360. ldr r9,[r14,#15*4]
  361. ldr r10,[r14,#13*4]
  362. ldr r11,[r14,#7*4]
  363. add r3,r8,r3,ror#2 @ E+=K_xx_xx
  364. ldr r12,[r14,#2*4]
  365. eor r9,r9,r10
  366. eor r11,r11,r12 @ 1 cycle stall
  367. eor r10,r6,r7 @ F_xx_xx
  368. mov r9,r9,ror#31
  369. add r3,r3,r4,ror#27 @ E+=ROR(A,27)
  370. eor r9,r9,r11,ror#31
  371. str r9,[r14,#-4]!
  372. eor r10,r5,r10,ror#2 @ F_xx_xx
  373. @ F_xx_xx
  374. add r3,r3,r9 @ E+=X[i]
  375. add r3,r3,r10 @ E+=F_20_39(B,C,D)
  376. ARM( teq r14,sp ) @ preserve carry
  377. THUMB( mov r11,sp )
  378. THUMB( teq r14,r11 ) @ preserve carry
  379. bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
  380. bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
  381. ldr r8,.LK_40_59
  382. sub sp,sp,#20*4 @ [+2]
  383. .L_40_59:
  384. ldr r9,[r14,#15*4]
  385. ldr r10,[r14,#13*4]
  386. ldr r11,[r14,#7*4]
  387. add r7,r8,r7,ror#2 @ E+=K_xx_xx
  388. ldr r12,[r14,#2*4]
  389. eor r9,r9,r10
  390. eor r11,r11,r12 @ 1 cycle stall
  391. eor r10,r5,r6 @ F_xx_xx
  392. mov r9,r9,ror#31
  393. add r7,r7,r3,ror#27 @ E+=ROR(A,27)
  394. eor r9,r9,r11,ror#31
  395. str r9,[r14,#-4]!
  396. and r10,r4,r10,ror#2 @ F_xx_xx
  397. and r11,r5,r6 @ F_xx_xx
  398. add r7,r7,r9 @ E+=X[i]
  399. add r7,r7,r10 @ E+=F_40_59(B,C,D)
  400. add r7,r7,r11,ror#2
  401. ldr r9,[r14,#15*4]
  402. ldr r10,[r14,#13*4]
  403. ldr r11,[r14,#7*4]
  404. add r6,r8,r6,ror#2 @ E+=K_xx_xx
  405. ldr r12,[r14,#2*4]
  406. eor r9,r9,r10
  407. eor r11,r11,r12 @ 1 cycle stall
  408. eor r10,r4,r5 @ F_xx_xx
  409. mov r9,r9,ror#31
  410. add r6,r6,r7,ror#27 @ E+=ROR(A,27)
  411. eor r9,r9,r11,ror#31
  412. str r9,[r14,#-4]!
  413. and r10,r3,r10,ror#2 @ F_xx_xx
  414. and r11,r4,r5 @ F_xx_xx
  415. add r6,r6,r9 @ E+=X[i]
  416. add r6,r6,r10 @ E+=F_40_59(B,C,D)
  417. add r6,r6,r11,ror#2
  418. ldr r9,[r14,#15*4]
  419. ldr r10,[r14,#13*4]
  420. ldr r11,[r14,#7*4]
  421. add r5,r8,r5,ror#2 @ E+=K_xx_xx
  422. ldr r12,[r14,#2*4]
  423. eor r9,r9,r10
  424. eor r11,r11,r12 @ 1 cycle stall
  425. eor r10,r3,r4 @ F_xx_xx
  426. mov r9,r9,ror#31
  427. add r5,r5,r6,ror#27 @ E+=ROR(A,27)
  428. eor r9,r9,r11,ror#31
  429. str r9,[r14,#-4]!
  430. and r10,r7,r10,ror#2 @ F_xx_xx
  431. and r11,r3,r4 @ F_xx_xx
  432. add r5,r5,r9 @ E+=X[i]
  433. add r5,r5,r10 @ E+=F_40_59(B,C,D)
  434. add r5,r5,r11,ror#2
  435. ldr r9,[r14,#15*4]
  436. ldr r10,[r14,#13*4]
  437. ldr r11,[r14,#7*4]
  438. add r4,r8,r4,ror#2 @ E+=K_xx_xx
  439. ldr r12,[r14,#2*4]
  440. eor r9,r9,r10
  441. eor r11,r11,r12 @ 1 cycle stall
  442. eor r10,r7,r3 @ F_xx_xx
  443. mov r9,r9,ror#31
  444. add r4,r4,r5,ror#27 @ E+=ROR(A,27)
  445. eor r9,r9,r11,ror#31
  446. str r9,[r14,#-4]!
  447. and r10,r6,r10,ror#2 @ F_xx_xx
  448. and r11,r7,r3 @ F_xx_xx
  449. add r4,r4,r9 @ E+=X[i]
  450. add r4,r4,r10 @ E+=F_40_59(B,C,D)
  451. add r4,r4,r11,ror#2
  452. ldr r9,[r14,#15*4]
  453. ldr r10,[r14,#13*4]
  454. ldr r11,[r14,#7*4]
  455. add r3,r8,r3,ror#2 @ E+=K_xx_xx
  456. ldr r12,[r14,#2*4]
  457. eor r9,r9,r10
  458. eor r11,r11,r12 @ 1 cycle stall
  459. eor r10,r6,r7 @ F_xx_xx
  460. mov r9,r9,ror#31
  461. add r3,r3,r4,ror#27 @ E+=ROR(A,27)
  462. eor r9,r9,r11,ror#31
  463. str r9,[r14,#-4]!
  464. and r10,r5,r10,ror#2 @ F_xx_xx
  465. and r11,r6,r7 @ F_xx_xx
  466. add r3,r3,r9 @ E+=X[i]
  467. add r3,r3,r10 @ E+=F_40_59(B,C,D)
  468. add r3,r3,r11,ror#2
  469. cmp r14,sp
  470. bne .L_40_59 @ [+((12+5)*5+2)*4]
  471. ldr r8,.LK_60_79
  472. sub sp,sp,#20*4
  473. cmp sp,#0 @ set carry to denote 60_79
  474. b .L_20_39_or_60_79 @ [+4], spare 300 bytes
  475. .L_done:
  476. add sp,sp,#80*4 @ "deallocate" stack frame
  477. ldmia r0,{r8,r9,r10,r11,r12}
  478. add r3,r8,r3
  479. add r4,r9,r4
  480. add r5,r10,r5,ror#2
  481. add r6,r11,r6,ror#2
  482. add r7,r12,r7,ror#2
  483. stmia r0,{r3,r4,r5,r6,r7}
  484. teq r1,r2
  485. bne .Lloop @ [+18], total 1307
  486. ldmia sp!,{r4-r12,pc}
  487. .align 2
  488. .LK_00_19: .word 0x5a827999
  489. .LK_20_39: .word 0x6ed9eba1
  490. .LK_40_59: .word 0x8f1bbcdc
  491. .LK_60_79: .word 0xca62c1d6
  492. ENDPROC(sha1_block_data_order)
  493. .asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
  494. .align 2