poly1305-core.S_shipped 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158
  1. #ifndef __KERNEL__
  2. # include "arm_arch.h"
  3. #else
  4. # define __ARM_ARCH__ __LINUX_ARM_ARCH__
  5. # define __ARM_MAX_ARCH__ __LINUX_ARM_ARCH__
  6. # define poly1305_init poly1305_init_arm
  7. # define poly1305_blocks poly1305_blocks_arm
  8. # define poly1305_emit poly1305_emit_arm
  9. .globl poly1305_blocks_neon
  10. #endif
  11. #if defined(__thumb2__)
  12. .syntax unified
  13. .thumb
  14. #else
  15. .code 32
  16. #endif
  17. .text
  18. .globl poly1305_emit
  19. .globl poly1305_blocks
  20. .globl poly1305_init
  21. .type poly1305_init,%function
  22. .align 5
  23. poly1305_init:
  24. .Lpoly1305_init:
  25. stmdb sp!,{r4-r11}
  26. eor r3,r3,r3
  27. cmp r1,#0
  28. str r3,[r0,#0] @ zero hash value
  29. str r3,[r0,#4]
  30. str r3,[r0,#8]
  31. str r3,[r0,#12]
  32. str r3,[r0,#16]
  33. str r3,[r0,#36] @ clear is_base2_26
  34. add r0,r0,#20
  35. #ifdef __thumb2__
  36. it eq
  37. #endif
  38. moveq r0,#0
  39. beq .Lno_key
  40. #if __ARM_MAX_ARCH__>=7
  41. mov r3,#-1
  42. str r3,[r0,#28] @ impossible key power value
  43. # ifndef __KERNEL__
  44. adr r11,.Lpoly1305_init
  45. ldr r12,.LOPENSSL_armcap
  46. # endif
  47. #endif
  48. ldrb r4,[r1,#0]
  49. mov r10,#0x0fffffff
  50. ldrb r5,[r1,#1]
  51. and r3,r10,#-4 @ 0x0ffffffc
  52. ldrb r6,[r1,#2]
  53. ldrb r7,[r1,#3]
  54. orr r4,r4,r5,lsl#8
  55. ldrb r5,[r1,#4]
  56. orr r4,r4,r6,lsl#16
  57. ldrb r6,[r1,#5]
  58. orr r4,r4,r7,lsl#24
  59. ldrb r7,[r1,#6]
  60. and r4,r4,r10
  61. #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
  62. # if !defined(_WIN32)
  63. ldr r12,[r11,r12] @ OPENSSL_armcap_P
  64. # endif
  65. # if defined(__APPLE__) || defined(_WIN32)
  66. ldr r12,[r12]
  67. # endif
  68. #endif
  69. ldrb r8,[r1,#7]
  70. orr r5,r5,r6,lsl#8
  71. ldrb r6,[r1,#8]
  72. orr r5,r5,r7,lsl#16
  73. ldrb r7,[r1,#9]
  74. orr r5,r5,r8,lsl#24
  75. ldrb r8,[r1,#10]
  76. and r5,r5,r3
  77. #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
  78. tst r12,#ARMV7_NEON @ check for NEON
  79. # ifdef __thumb2__
  80. adr r9,.Lpoly1305_blocks_neon
  81. adr r11,.Lpoly1305_blocks
  82. it ne
  83. movne r11,r9
  84. adr r12,.Lpoly1305_emit
  85. orr r11,r11,#1 @ thumb-ify addresses
  86. orr r12,r12,#1
  87. # else
  88. add r12,r11,#(.Lpoly1305_emit-.Lpoly1305_init)
  89. ite eq
  90. addeq r11,r11,#(.Lpoly1305_blocks-.Lpoly1305_init)
  91. addne r11,r11,#(.Lpoly1305_blocks_neon-.Lpoly1305_init)
  92. # endif
  93. #endif
  94. ldrb r9,[r1,#11]
  95. orr r6,r6,r7,lsl#8
  96. ldrb r7,[r1,#12]
  97. orr r6,r6,r8,lsl#16
  98. ldrb r8,[r1,#13]
  99. orr r6,r6,r9,lsl#24
  100. ldrb r9,[r1,#14]
  101. and r6,r6,r3
  102. ldrb r10,[r1,#15]
  103. orr r7,r7,r8,lsl#8
  104. str r4,[r0,#0]
  105. orr r7,r7,r9,lsl#16
  106. str r5,[r0,#4]
  107. orr r7,r7,r10,lsl#24
  108. str r6,[r0,#8]
  109. and r7,r7,r3
  110. str r7,[r0,#12]
  111. #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
  112. stmia r2,{r11,r12} @ fill functions table
  113. mov r0,#1
  114. #else
  115. mov r0,#0
  116. #endif
  117. .Lno_key:
  118. ldmia sp!,{r4-r11}
  119. #if __ARM_ARCH__>=5
  120. bx lr @ bx lr
  121. #else
  122. tst lr,#1
  123. moveq pc,lr @ be binary compatible with V4, yet
  124. .word 0xe12fff1e @ interoperable with Thumb ISA:-)
  125. #endif
  126. .size poly1305_init,.-poly1305_init
  127. .type poly1305_blocks,%function
  128. .align 5
  129. poly1305_blocks:
  130. .Lpoly1305_blocks:
  131. stmdb sp!,{r3-r11,lr}
  132. ands r2,r2,#-16
  133. beq .Lno_data
  134. add r2,r2,r1 @ end pointer
  135. sub sp,sp,#32
  136. #if __ARM_ARCH__<7
  137. ldmia r0,{r4-r12} @ load context
  138. add r0,r0,#20
  139. str r2,[sp,#16] @ offload stuff
  140. str r0,[sp,#12]
  141. #else
  142. ldr lr,[r0,#36] @ is_base2_26
  143. ldmia r0!,{r4-r8} @ load hash value
  144. str r2,[sp,#16] @ offload stuff
  145. str r0,[sp,#12]
  146. adds r9,r4,r5,lsl#26 @ base 2^26 -> base 2^32
  147. mov r10,r5,lsr#6
  148. adcs r10,r10,r6,lsl#20
  149. mov r11,r6,lsr#12
  150. adcs r11,r11,r7,lsl#14
  151. mov r12,r7,lsr#18
  152. adcs r12,r12,r8,lsl#8
  153. mov r2,#0
  154. teq lr,#0
  155. str r2,[r0,#16] @ clear is_base2_26
  156. adc r2,r2,r8,lsr#24
  157. itttt ne
  158. movne r4,r9 @ choose between radixes
  159. movne r5,r10
  160. movne r6,r11
  161. movne r7,r12
  162. ldmia r0,{r9-r12} @ load key
  163. it ne
  164. movne r8,r2
  165. #endif
  166. mov lr,r1
  167. cmp r3,#0
  168. str r10,[sp,#20]
  169. str r11,[sp,#24]
  170. str r12,[sp,#28]
  171. b .Loop
  172. .align 4
  173. .Loop:
  174. #if __ARM_ARCH__<7
  175. ldrb r0,[lr],#16 @ load input
  176. # ifdef __thumb2__
  177. it hi
  178. # endif
  179. addhi r8,r8,#1 @ 1<<128
  180. ldrb r1,[lr,#-15]
  181. ldrb r2,[lr,#-14]
  182. ldrb r3,[lr,#-13]
  183. orr r1,r0,r1,lsl#8
  184. ldrb r0,[lr,#-12]
  185. orr r2,r1,r2,lsl#16
  186. ldrb r1,[lr,#-11]
  187. orr r3,r2,r3,lsl#24
  188. ldrb r2,[lr,#-10]
  189. adds r4,r4,r3 @ accumulate input
  190. ldrb r3,[lr,#-9]
  191. orr r1,r0,r1,lsl#8
  192. ldrb r0,[lr,#-8]
  193. orr r2,r1,r2,lsl#16
  194. ldrb r1,[lr,#-7]
  195. orr r3,r2,r3,lsl#24
  196. ldrb r2,[lr,#-6]
  197. adcs r5,r5,r3
  198. ldrb r3,[lr,#-5]
  199. orr r1,r0,r1,lsl#8
  200. ldrb r0,[lr,#-4]
  201. orr r2,r1,r2,lsl#16
  202. ldrb r1,[lr,#-3]
  203. orr r3,r2,r3,lsl#24
  204. ldrb r2,[lr,#-2]
  205. adcs r6,r6,r3
  206. ldrb r3,[lr,#-1]
  207. orr r1,r0,r1,lsl#8
  208. str lr,[sp,#8] @ offload input pointer
  209. orr r2,r1,r2,lsl#16
  210. add r10,r10,r10,lsr#2
  211. orr r3,r2,r3,lsl#24
  212. #else
  213. ldr r0,[lr],#16 @ load input
  214. it hi
  215. addhi r8,r8,#1 @ padbit
  216. ldr r1,[lr,#-12]
  217. ldr r2,[lr,#-8]
  218. ldr r3,[lr,#-4]
  219. # ifdef __ARMEB__
  220. rev r0,r0
  221. rev r1,r1
  222. rev r2,r2
  223. rev r3,r3
  224. # endif
  225. adds r4,r4,r0 @ accumulate input
  226. str lr,[sp,#8] @ offload input pointer
  227. adcs r5,r5,r1
  228. add r10,r10,r10,lsr#2
  229. adcs r6,r6,r2
  230. #endif
  231. add r11,r11,r11,lsr#2
  232. adcs r7,r7,r3
  233. add r12,r12,r12,lsr#2
  234. umull r2,r3,r5,r9
  235. adc r8,r8,#0
  236. umull r0,r1,r4,r9
  237. umlal r2,r3,r8,r10
  238. umlal r0,r1,r7,r10
  239. ldr r10,[sp,#20] @ reload r10
  240. umlal r2,r3,r6,r12
  241. umlal r0,r1,r5,r12
  242. umlal r2,r3,r7,r11
  243. umlal r0,r1,r6,r11
  244. umlal r2,r3,r4,r10
  245. str r0,[sp,#0] @ future r4
  246. mul r0,r11,r8
  247. ldr r11,[sp,#24] @ reload r11
  248. adds r2,r2,r1 @ d1+=d0>>32
  249. eor r1,r1,r1
  250. adc lr,r3,#0 @ future r6
  251. str r2,[sp,#4] @ future r5
  252. mul r2,r12,r8
  253. eor r3,r3,r3
  254. umlal r0,r1,r7,r12
  255. ldr r12,[sp,#28] @ reload r12
  256. umlal r2,r3,r7,r9
  257. umlal r0,r1,r6,r9
  258. umlal r2,r3,r6,r10
  259. umlal r0,r1,r5,r10
  260. umlal r2,r3,r5,r11
  261. umlal r0,r1,r4,r11
  262. umlal r2,r3,r4,r12
  263. ldr r4,[sp,#0]
  264. mul r8,r9,r8
  265. ldr r5,[sp,#4]
  266. adds r6,lr,r0 @ d2+=d1>>32
  267. ldr lr,[sp,#8] @ reload input pointer
  268. adc r1,r1,#0
  269. adds r7,r2,r1 @ d3+=d2>>32
  270. ldr r0,[sp,#16] @ reload end pointer
  271. adc r3,r3,#0
  272. add r8,r8,r3 @ h4+=d3>>32
  273. and r1,r8,#-4
  274. and r8,r8,#3
  275. add r1,r1,r1,lsr#2 @ *=5
  276. adds r4,r4,r1
  277. adcs r5,r5,#0
  278. adcs r6,r6,#0
  279. adcs r7,r7,#0
  280. adc r8,r8,#0
  281. cmp r0,lr @ done yet?
  282. bhi .Loop
  283. ldr r0,[sp,#12]
  284. add sp,sp,#32
  285. stmdb r0,{r4-r8} @ store the result
  286. .Lno_data:
  287. #if __ARM_ARCH__>=5
  288. ldmia sp!,{r3-r11,pc}
  289. #else
  290. ldmia sp!,{r3-r11,lr}
  291. tst lr,#1
  292. moveq pc,lr @ be binary compatible with V4, yet
  293. .word 0xe12fff1e @ interoperable with Thumb ISA:-)
  294. #endif
  295. .size poly1305_blocks,.-poly1305_blocks
  296. .type poly1305_emit,%function
  297. .align 5
  298. poly1305_emit:
  299. .Lpoly1305_emit:
  300. stmdb sp!,{r4-r11}
  301. ldmia r0,{r3-r7}
  302. #if __ARM_ARCH__>=7
  303. ldr ip,[r0,#36] @ is_base2_26
  304. adds r8,r3,r4,lsl#26 @ base 2^26 -> base 2^32
  305. mov r9,r4,lsr#6
  306. adcs r9,r9,r5,lsl#20
  307. mov r10,r5,lsr#12
  308. adcs r10,r10,r6,lsl#14
  309. mov r11,r6,lsr#18
  310. adcs r11,r11,r7,lsl#8
  311. mov r0,#0
  312. adc r0,r0,r7,lsr#24
  313. tst ip,ip
  314. itttt ne
  315. movne r3,r8
  316. movne r4,r9
  317. movne r5,r10
  318. movne r6,r11
  319. it ne
  320. movne r7,r0
  321. #endif
  322. adds r8,r3,#5 @ compare to modulus
  323. adcs r9,r4,#0
  324. adcs r10,r5,#0
  325. adcs r11,r6,#0
  326. adc r0,r7,#0
  327. tst r0,#4 @ did it carry/borrow?
  328. #ifdef __thumb2__
  329. it ne
  330. #endif
  331. movne r3,r8
  332. ldr r8,[r2,#0]
  333. #ifdef __thumb2__
  334. it ne
  335. #endif
  336. movne r4,r9
  337. ldr r9,[r2,#4]
  338. #ifdef __thumb2__
  339. it ne
  340. #endif
  341. movne r5,r10
  342. ldr r10,[r2,#8]
  343. #ifdef __thumb2__
  344. it ne
  345. #endif
  346. movne r6,r11
  347. ldr r11,[r2,#12]
  348. adds r3,r3,r8
  349. adcs r4,r4,r9
  350. adcs r5,r5,r10
  351. adc r6,r6,r11
  352. #if __ARM_ARCH__>=7
  353. # ifdef __ARMEB__
  354. rev r3,r3
  355. rev r4,r4
  356. rev r5,r5
  357. rev r6,r6
  358. # endif
  359. str r3,[r1,#0]
  360. str r4,[r1,#4]
  361. str r5,[r1,#8]
  362. str r6,[r1,#12]
  363. #else
  364. strb r3,[r1,#0]
  365. mov r3,r3,lsr#8
  366. strb r4,[r1,#4]
  367. mov r4,r4,lsr#8
  368. strb r5,[r1,#8]
  369. mov r5,r5,lsr#8
  370. strb r6,[r1,#12]
  371. mov r6,r6,lsr#8
  372. strb r3,[r1,#1]
  373. mov r3,r3,lsr#8
  374. strb r4,[r1,#5]
  375. mov r4,r4,lsr#8
  376. strb r5,[r1,#9]
  377. mov r5,r5,lsr#8
  378. strb r6,[r1,#13]
  379. mov r6,r6,lsr#8
  380. strb r3,[r1,#2]
  381. mov r3,r3,lsr#8
  382. strb r4,[r1,#6]
  383. mov r4,r4,lsr#8
  384. strb r5,[r1,#10]
  385. mov r5,r5,lsr#8
  386. strb r6,[r1,#14]
  387. mov r6,r6,lsr#8
  388. strb r3,[r1,#3]
  389. strb r4,[r1,#7]
  390. strb r5,[r1,#11]
  391. strb r6,[r1,#15]
  392. #endif
  393. ldmia sp!,{r4-r11}
  394. #if __ARM_ARCH__>=5
  395. bx lr @ bx lr
  396. #else
  397. tst lr,#1
  398. moveq pc,lr @ be binary compatible with V4, yet
  399. .word 0xe12fff1e @ interoperable with Thumb ISA:-)
  400. #endif
  401. .size poly1305_emit,.-poly1305_emit
  402. #if __ARM_MAX_ARCH__>=7
  403. .fpu neon
  404. .type poly1305_init_neon,%function
  405. .align 5
  406. poly1305_init_neon:
  407. .Lpoly1305_init_neon:
  408. ldr r3,[r0,#48] @ first table element
  409. cmp r3,#-1 @ is value impossible?
  410. bne .Lno_init_neon
  411. ldr r4,[r0,#20] @ load key base 2^32
  412. ldr r5,[r0,#24]
  413. ldr r6,[r0,#28]
  414. ldr r7,[r0,#32]
  415. and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26
  416. mov r3,r4,lsr#26
  417. mov r4,r5,lsr#20
  418. orr r3,r3,r5,lsl#6
  419. mov r5,r6,lsr#14
  420. orr r4,r4,r6,lsl#12
  421. mov r6,r7,lsr#8
  422. orr r5,r5,r7,lsl#18
  423. and r3,r3,#0x03ffffff
  424. and r4,r4,#0x03ffffff
  425. and r5,r5,#0x03ffffff
  426. vdup.32 d0,r2 @ r^1 in both lanes
  427. add r2,r3,r3,lsl#2 @ *5
  428. vdup.32 d1,r3
  429. add r3,r4,r4,lsl#2
  430. vdup.32 d2,r2
  431. vdup.32 d3,r4
  432. add r4,r5,r5,lsl#2
  433. vdup.32 d4,r3
  434. vdup.32 d5,r5
  435. add r5,r6,r6,lsl#2
  436. vdup.32 d6,r4
  437. vdup.32 d7,r6
  438. vdup.32 d8,r5
  439. mov r5,#2 @ counter
  440. .Lsquare_neon:
  441. @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
  442. @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
  443. @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
  444. @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
  445. @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
  446. @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
  447. vmull.u32 q5,d0,d0[1]
  448. vmull.u32 q6,d1,d0[1]
  449. vmull.u32 q7,d3,d0[1]
  450. vmull.u32 q8,d5,d0[1]
  451. vmull.u32 q9,d7,d0[1]
  452. vmlal.u32 q5,d7,d2[1]
  453. vmlal.u32 q6,d0,d1[1]
  454. vmlal.u32 q7,d1,d1[1]
  455. vmlal.u32 q8,d3,d1[1]
  456. vmlal.u32 q9,d5,d1[1]
  457. vmlal.u32 q5,d5,d4[1]
  458. vmlal.u32 q6,d7,d4[1]
  459. vmlal.u32 q8,d1,d3[1]
  460. vmlal.u32 q7,d0,d3[1]
  461. vmlal.u32 q9,d3,d3[1]
  462. vmlal.u32 q5,d3,d6[1]
  463. vmlal.u32 q8,d0,d5[1]
  464. vmlal.u32 q6,d5,d6[1]
  465. vmlal.u32 q7,d7,d6[1]
  466. vmlal.u32 q9,d1,d5[1]
  467. vmlal.u32 q8,d7,d8[1]
  468. vmlal.u32 q5,d1,d8[1]
  469. vmlal.u32 q6,d3,d8[1]
  470. vmlal.u32 q7,d5,d8[1]
  471. vmlal.u32 q9,d0,d7[1]
  472. @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
  473. @ lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
  474. @ and P. Schwabe
  475. @
  476. @ H0>>+H1>>+H2>>+H3>>+H4
  477. @ H3>>+H4>>*5+H0>>+H1
  478. @
  479. @ Trivia.
  480. @
  481. @ Result of multiplication of n-bit number by m-bit number is
  482. @ n+m bits wide. However! Even though 2^n is a n+1-bit number,
  483. @ m-bit number multiplied by 2^n is still n+m bits wide.
  484. @
  485. @ Sum of two n-bit numbers is n+1 bits wide, sum of three - n+2,
  486. @ and so is sum of four. Sum of 2^m n-m-bit numbers and n-bit
  487. @ one is n+1 bits wide.
  488. @
  489. @ >>+ denotes Hnext += Hn>>26, Hn &= 0x3ffffff. This means that
  490. @ H0, H2, H3 are guaranteed to be 26 bits wide, while H1 and H4
  491. @ can be 27. However! In cases when their width exceeds 26 bits
  492. @ they are limited by 2^26+2^6. This in turn means that *sum*
  493. @ of the products with these values can still be viewed as sum
  494. @ of 52-bit numbers as long as the amount of addends is not a
  495. @ power of 2. For example,
  496. @
  497. @ H4 = H4*R0 + H3*R1 + H2*R2 + H1*R3 + H0 * R4,
  498. @
  499. @ which can't be larger than 5 * (2^26 + 2^6) * (2^26 + 2^6), or
  500. @ 5 * (2^52 + 2*2^32 + 2^12), which in turn is smaller than
  501. @ 8 * (2^52) or 2^55. However, the value is then multiplied by
  502. @ by 5, so we should be looking at 5 * 5 * (2^52 + 2^33 + 2^12),
  503. @ which is less than 32 * (2^52) or 2^57. And when processing
  504. @ data we are looking at triple as many addends...
  505. @
  506. @ In key setup procedure pre-reduced H0 is limited by 5*4+1 and
  507. @ 5*H4 - by 5*5 52-bit addends, or 57 bits. But when hashing the
  508. @ input H0 is limited by (5*4+1)*3 addends, or 58 bits, while
  509. @ 5*H4 by 5*5*3, or 59[!] bits. How is this relevant? vmlal.u32
  510. @ instruction accepts 2x32-bit input and writes 2x64-bit result.
  511. @ This means that result of reduction have to be compressed upon
  512. @ loop wrap-around. This can be done in the process of reduction
  513. @ to minimize amount of instructions [as well as amount of
  514. @ 128-bit instructions, which benefits low-end processors], but
  515. @ one has to watch for H2 (which is narrower than H0) and 5*H4
  516. @ not being wider than 58 bits, so that result of right shift
  517. @ by 26 bits fits in 32 bits. This is also useful on x86,
  518. @ because it allows to use paddd in place for paddq, which
  519. @ benefits Atom, where paddq is ridiculously slow.
  520. vshr.u64 q15,q8,#26
  521. vmovn.i64 d16,q8
  522. vshr.u64 q4,q5,#26
  523. vmovn.i64 d10,q5
  524. vadd.i64 q9,q9,q15 @ h3 -> h4
  525. vbic.i32 d16,#0xfc000000 @ &=0x03ffffff
  526. vadd.i64 q6,q6,q4 @ h0 -> h1
  527. vbic.i32 d10,#0xfc000000
  528. vshrn.u64 d30,q9,#26
  529. vmovn.i64 d18,q9
  530. vshr.u64 q4,q6,#26
  531. vmovn.i64 d12,q6
  532. vadd.i64 q7,q7,q4 @ h1 -> h2
  533. vbic.i32 d18,#0xfc000000
  534. vbic.i32 d12,#0xfc000000
  535. vadd.i32 d10,d10,d30
  536. vshl.u32 d30,d30,#2
  537. vshrn.u64 d8,q7,#26
  538. vmovn.i64 d14,q7
  539. vadd.i32 d10,d10,d30 @ h4 -> h0
  540. vadd.i32 d16,d16,d8 @ h2 -> h3
  541. vbic.i32 d14,#0xfc000000
  542. vshr.u32 d30,d10,#26
  543. vbic.i32 d10,#0xfc000000
  544. vshr.u32 d8,d16,#26
  545. vbic.i32 d16,#0xfc000000
  546. vadd.i32 d12,d12,d30 @ h0 -> h1
  547. vadd.i32 d18,d18,d8 @ h3 -> h4
  548. subs r5,r5,#1
  549. beq .Lsquare_break_neon
  550. add r6,r0,#(48+0*9*4)
  551. add r7,r0,#(48+1*9*4)
  552. vtrn.32 d0,d10 @ r^2:r^1
  553. vtrn.32 d3,d14
  554. vtrn.32 d5,d16
  555. vtrn.32 d1,d12
  556. vtrn.32 d7,d18
  557. vshl.u32 d4,d3,#2 @ *5
  558. vshl.u32 d6,d5,#2
  559. vshl.u32 d2,d1,#2
  560. vshl.u32 d8,d7,#2
  561. vadd.i32 d4,d4,d3
  562. vadd.i32 d2,d2,d1
  563. vadd.i32 d6,d6,d5
  564. vadd.i32 d8,d8,d7
  565. vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]!
  566. vst4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]!
  567. vst4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
  568. vst4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
  569. vst1.32 {d8[0]},[r6,:32]
  570. vst1.32 {d8[1]},[r7,:32]
  571. b .Lsquare_neon
  572. .align 4
  573. .Lsquare_break_neon:
  574. add r6,r0,#(48+2*4*9)
  575. add r7,r0,#(48+3*4*9)
  576. vmov d0,d10 @ r^4:r^3
  577. vshl.u32 d2,d12,#2 @ *5
  578. vmov d1,d12
  579. vshl.u32 d4,d14,#2
  580. vmov d3,d14
  581. vshl.u32 d6,d16,#2
  582. vmov d5,d16
  583. vshl.u32 d8,d18,#2
  584. vmov d7,d18
  585. vadd.i32 d2,d2,d12
  586. vadd.i32 d4,d4,d14
  587. vadd.i32 d6,d6,d16
  588. vadd.i32 d8,d8,d18
  589. vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]!
  590. vst4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]!
  591. vst4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
  592. vst4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
  593. vst1.32 {d8[0]},[r6]
  594. vst1.32 {d8[1]},[r7]
  595. .Lno_init_neon:
  596. bx lr @ bx lr
  597. .size poly1305_init_neon,.-poly1305_init_neon
  598. .type poly1305_blocks_neon,%function
  599. .align 5
  600. poly1305_blocks_neon:
  601. .Lpoly1305_blocks_neon:
  602. ldr ip,[r0,#36] @ is_base2_26
  603. cmp r2,#64
  604. blo .Lpoly1305_blocks
  605. stmdb sp!,{r4-r7}
  606. vstmdb sp!,{d8-d15} @ ABI specification says so
  607. tst ip,ip @ is_base2_26?
  608. bne .Lbase2_26_neon
  609. stmdb sp!,{r1-r3,lr}
  610. bl .Lpoly1305_init_neon
  611. ldr r4,[r0,#0] @ load hash value base 2^32
  612. ldr r5,[r0,#4]
  613. ldr r6,[r0,#8]
  614. ldr r7,[r0,#12]
  615. ldr ip,[r0,#16]
  616. and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26
  617. mov r3,r4,lsr#26
  618. veor d10,d10,d10
  619. mov r4,r5,lsr#20
  620. orr r3,r3,r5,lsl#6
  621. veor d12,d12,d12
  622. mov r5,r6,lsr#14
  623. orr r4,r4,r6,lsl#12
  624. veor d14,d14,d14
  625. mov r6,r7,lsr#8
  626. orr r5,r5,r7,lsl#18
  627. veor d16,d16,d16
  628. and r3,r3,#0x03ffffff
  629. orr r6,r6,ip,lsl#24
  630. veor d18,d18,d18
  631. and r4,r4,#0x03ffffff
  632. mov r1,#1
  633. and r5,r5,#0x03ffffff
  634. str r1,[r0,#36] @ set is_base2_26
  635. vmov.32 d10[0],r2
  636. vmov.32 d12[0],r3
  637. vmov.32 d14[0],r4
  638. vmov.32 d16[0],r5
  639. vmov.32 d18[0],r6
  640. adr r5,.Lzeros
  641. ldmia sp!,{r1-r3,lr}
  642. b .Lhash_loaded
  643. .align 4
  644. .Lbase2_26_neon:
  645. @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
  646. @ load hash value
  647. veor d10,d10,d10
  648. veor d12,d12,d12
  649. veor d14,d14,d14
  650. veor d16,d16,d16
  651. veor d18,d18,d18
  652. vld4.32 {d10[0],d12[0],d14[0],d16[0]},[r0]!
  653. adr r5,.Lzeros
  654. vld1.32 {d18[0]},[r0]
  655. sub r0,r0,#16 @ rewind
  656. .Lhash_loaded:
  657. add r4,r1,#32
  658. mov r3,r3,lsl#24
  659. tst r2,#31
  660. beq .Leven
  661. vld4.32 {d20[0],d22[0],d24[0],d26[0]},[r1]!
  662. vmov.32 d28[0],r3
  663. sub r2,r2,#16
  664. add r4,r1,#32
  665. # ifdef __ARMEB__
  666. vrev32.8 q10,q10
  667. vrev32.8 q13,q13
  668. vrev32.8 q11,q11
  669. vrev32.8 q12,q12
  670. # endif
  671. vsri.u32 d28,d26,#8 @ base 2^32 -> base 2^26
  672. vshl.u32 d26,d26,#18
  673. vsri.u32 d26,d24,#14
  674. vshl.u32 d24,d24,#12
  675. vadd.i32 d29,d28,d18 @ add hash value and move to #hi
  676. vbic.i32 d26,#0xfc000000
  677. vsri.u32 d24,d22,#20
  678. vshl.u32 d22,d22,#6
  679. vbic.i32 d24,#0xfc000000
  680. vsri.u32 d22,d20,#26
  681. vadd.i32 d27,d26,d16
  682. vbic.i32 d20,#0xfc000000
  683. vbic.i32 d22,#0xfc000000
  684. vadd.i32 d25,d24,d14
  685. vadd.i32 d21,d20,d10
  686. vadd.i32 d23,d22,d12
  687. mov r7,r5
  688. add r6,r0,#48
  689. cmp r2,r2
  690. b .Long_tail
  691. .align 4
  692. .Leven:
  693. subs r2,r2,#64
  694. it lo
  695. movlo r4,r5
  696. vmov.i32 q14,#1<<24 @ padbit, yes, always
  697. vld4.32 {d20,d22,d24,d26},[r1] @ inp[0:1]
  698. add r1,r1,#64
  699. vld4.32 {d21,d23,d25,d27},[r4] @ inp[2:3] (or 0)
  700. add r4,r4,#64
  701. itt hi
  702. addhi r7,r0,#(48+1*9*4)
  703. addhi r6,r0,#(48+3*9*4)
  704. # ifdef __ARMEB__
  705. vrev32.8 q10,q10
  706. vrev32.8 q13,q13
  707. vrev32.8 q11,q11
  708. vrev32.8 q12,q12
  709. # endif
  710. vsri.u32 q14,q13,#8 @ base 2^32 -> base 2^26
  711. vshl.u32 q13,q13,#18
  712. vsri.u32 q13,q12,#14
  713. vshl.u32 q12,q12,#12
  714. vbic.i32 q13,#0xfc000000
  715. vsri.u32 q12,q11,#20
  716. vshl.u32 q11,q11,#6
  717. vbic.i32 q12,#0xfc000000
  718. vsri.u32 q11,q10,#26
  719. vbic.i32 q10,#0xfc000000
  720. vbic.i32 q11,#0xfc000000
  721. bls .Lskip_loop
  722. vld4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! @ load r^2
  723. vld4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! @ load r^4
  724. vld4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
  725. vld4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
  726. b .Loop_neon
  727. .align 5
  728. .Loop_neon:
  729. @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
  730. @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
  731. @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
  732. @ ___________________/
  733. @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
  734. @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
  735. @ ___________________/ ____________________/
  736. @
  737. @ Note that we start with inp[2:3]*r^2. This is because it
  738. @ doesn't depend on reduction in previous iteration.
  739. @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
  740. @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
  741. @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
  742. @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
  743. @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
  744. @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
  745. @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
  746. @ inp[2:3]*r^2
  747. vadd.i32 d24,d24,d14 @ accumulate inp[0:1]
  748. vmull.u32 q7,d25,d0[1]
  749. vadd.i32 d20,d20,d10
  750. vmull.u32 q5,d21,d0[1]
  751. vadd.i32 d26,d26,d16
  752. vmull.u32 q8,d27,d0[1]
  753. vmlal.u32 q7,d23,d1[1]
  754. vadd.i32 d22,d22,d12
  755. vmull.u32 q6,d23,d0[1]
  756. vadd.i32 d28,d28,d18
  757. vmull.u32 q9,d29,d0[1]
  758. subs r2,r2,#64
  759. vmlal.u32 q5,d29,d2[1]
  760. it lo
  761. movlo r4,r5
  762. vmlal.u32 q8,d25,d1[1]
  763. vld1.32 d8[1],[r7,:32]
  764. vmlal.u32 q6,d21,d1[1]
  765. vmlal.u32 q9,d27,d1[1]
  766. vmlal.u32 q5,d27,d4[1]
  767. vmlal.u32 q8,d23,d3[1]
  768. vmlal.u32 q9,d25,d3[1]
  769. vmlal.u32 q6,d29,d4[1]
  770. vmlal.u32 q7,d21,d3[1]
  771. vmlal.u32 q8,d21,d5[1]
  772. vmlal.u32 q5,d25,d6[1]
  773. vmlal.u32 q9,d23,d5[1]
  774. vmlal.u32 q6,d27,d6[1]
  775. vmlal.u32 q7,d29,d6[1]
  776. vmlal.u32 q8,d29,d8[1]
  777. vmlal.u32 q5,d23,d8[1]
  778. vmlal.u32 q9,d21,d7[1]
  779. vmlal.u32 q6,d25,d8[1]
  780. vmlal.u32 q7,d27,d8[1]
  781. vld4.32 {d21,d23,d25,d27},[r4] @ inp[2:3] (or 0)
  782. add r4,r4,#64
  783. @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
  784. @ (hash+inp[0:1])*r^4 and accumulate
  785. vmlal.u32 q8,d26,d0[0]
  786. vmlal.u32 q5,d20,d0[0]
  787. vmlal.u32 q9,d28,d0[0]
  788. vmlal.u32 q6,d22,d0[0]
  789. vmlal.u32 q7,d24,d0[0]
  790. vld1.32 d8[0],[r6,:32]
  791. vmlal.u32 q8,d24,d1[0]
  792. vmlal.u32 q5,d28,d2[0]
  793. vmlal.u32 q9,d26,d1[0]
  794. vmlal.u32 q6,d20,d1[0]
  795. vmlal.u32 q7,d22,d1[0]
  796. vmlal.u32 q8,d22,d3[0]
  797. vmlal.u32 q5,d26,d4[0]
  798. vmlal.u32 q9,d24,d3[0]
  799. vmlal.u32 q6,d28,d4[0]
  800. vmlal.u32 q7,d20,d3[0]
  801. vmlal.u32 q8,d20,d5[0]
  802. vmlal.u32 q5,d24,d6[0]
  803. vmlal.u32 q9,d22,d5[0]
  804. vmlal.u32 q6,d26,d6[0]
  805. vmlal.u32 q8,d28,d8[0]
  806. vmlal.u32 q7,d28,d6[0]
  807. vmlal.u32 q5,d22,d8[0]
  808. vmlal.u32 q9,d20,d7[0]
  809. vmov.i32 q14,#1<<24 @ padbit, yes, always
  810. vmlal.u32 q6,d24,d8[0]
  811. vmlal.u32 q7,d26,d8[0]
  812. vld4.32 {d20,d22,d24,d26},[r1] @ inp[0:1]
  813. add r1,r1,#64
  814. # ifdef __ARMEB__
  815. vrev32.8 q10,q10
  816. vrev32.8 q11,q11
  817. vrev32.8 q12,q12
  818. vrev32.8 q13,q13
  819. # endif
  820. @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
  821. @ lazy reduction interleaved with base 2^32 -> base 2^26 of
  822. @ inp[0:3] previously loaded to q10-q13 and smashed to q10-q14.
  823. vshr.u64 q15,q8,#26
  824. vmovn.i64 d16,q8
  825. vshr.u64 q4,q5,#26
  826. vmovn.i64 d10,q5
  827. vadd.i64 q9,q9,q15 @ h3 -> h4
  828. vbic.i32 d16,#0xfc000000
  829. vsri.u32 q14,q13,#8 @ base 2^32 -> base 2^26
  830. vadd.i64 q6,q6,q4 @ h0 -> h1
  831. vshl.u32 q13,q13,#18
  832. vbic.i32 d10,#0xfc000000
  833. vshrn.u64 d30,q9,#26
  834. vmovn.i64 d18,q9
  835. vshr.u64 q4,q6,#26
  836. vmovn.i64 d12,q6
  837. vadd.i64 q7,q7,q4 @ h1 -> h2
  838. vsri.u32 q13,q12,#14
  839. vbic.i32 d18,#0xfc000000
  840. vshl.u32 q12,q12,#12
  841. vbic.i32 d12,#0xfc000000
  842. vadd.i32 d10,d10,d30
  843. vshl.u32 d30,d30,#2
  844. vbic.i32 q13,#0xfc000000
  845. vshrn.u64 d8,q7,#26
  846. vmovn.i64 d14,q7
  847. vaddl.u32 q5,d10,d30 @ h4 -> h0 [widen for a sec]
  848. vsri.u32 q12,q11,#20
  849. vadd.i32 d16,d16,d8 @ h2 -> h3
  850. vshl.u32 q11,q11,#6
  851. vbic.i32 d14,#0xfc000000
  852. vbic.i32 q12,#0xfc000000
  853. vshrn.u64 d30,q5,#26 @ re-narrow
  854. vmovn.i64 d10,q5
  855. vsri.u32 q11,q10,#26
  856. vbic.i32 q10,#0xfc000000
  857. vshr.u32 d8,d16,#26
  858. vbic.i32 d16,#0xfc000000
  859. vbic.i32 d10,#0xfc000000
  860. vadd.i32 d12,d12,d30 @ h0 -> h1
  861. vadd.i32 d18,d18,d8 @ h3 -> h4
  862. vbic.i32 q11,#0xfc000000
  863. bhi .Loop_neon
  864. .Lskip_loop:
  865. @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
  866. @ multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
  867. add r7,r0,#(48+0*9*4)
  868. add r6,r0,#(48+1*9*4)
  869. adds r2,r2,#32
  870. it ne
  871. movne r2,#0
  872. bne .Long_tail
  873. vadd.i32 d25,d24,d14 @ add hash value and move to #hi
  874. vadd.i32 d21,d20,d10
  875. vadd.i32 d27,d26,d16
  876. vadd.i32 d23,d22,d12
  877. vadd.i32 d29,d28,d18
  878. .Long_tail:
  879. vld4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! @ load r^1
  880. vld4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! @ load r^2
  881. vadd.i32 d24,d24,d14 @ can be redundant
  882. vmull.u32 q7,d25,d0
  883. vadd.i32 d20,d20,d10
  884. vmull.u32 q5,d21,d0
  885. vadd.i32 d26,d26,d16
  886. vmull.u32 q8,d27,d0
  887. vadd.i32 d22,d22,d12
  888. vmull.u32 q6,d23,d0
  889. vadd.i32 d28,d28,d18
  890. vmull.u32 q9,d29,d0
  891. vmlal.u32 q5,d29,d2
  892. vld4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
  893. vmlal.u32 q8,d25,d1
  894. vld4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
  895. vmlal.u32 q6,d21,d1
  896. vmlal.u32 q9,d27,d1
  897. vmlal.u32 q7,d23,d1
  898. vmlal.u32 q8,d23,d3
  899. vld1.32 d8[1],[r7,:32]
  900. vmlal.u32 q5,d27,d4
  901. vld1.32 d8[0],[r6,:32]
  902. vmlal.u32 q9,d25,d3
  903. vmlal.u32 q6,d29,d4
  904. vmlal.u32 q7,d21,d3
  905. vmlal.u32 q8,d21,d5
  906. it ne
  907. addne r7,r0,#(48+2*9*4)
  908. vmlal.u32 q5,d25,d6
  909. it ne
  910. addne r6,r0,#(48+3*9*4)
  911. vmlal.u32 q9,d23,d5
  912. vmlal.u32 q6,d27,d6
  913. vmlal.u32 q7,d29,d6
  914. vmlal.u32 q8,d29,d8
  915. vorn q0,q0,q0 @ all-ones, can be redundant
  916. vmlal.u32 q5,d23,d8
  917. vshr.u64 q0,q0,#38
  918. vmlal.u32 q9,d21,d7
  919. vmlal.u32 q6,d25,d8
  920. vmlal.u32 q7,d27,d8
  921. beq .Lshort_tail
  922. @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
  923. @ (hash+inp[0:1])*r^4:r^3 and accumulate
  924. vld4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! @ load r^3
  925. vld4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! @ load r^4
  926. vmlal.u32 q7,d24,d0
  927. vmlal.u32 q5,d20,d0
  928. vmlal.u32 q8,d26,d0
  929. vmlal.u32 q6,d22,d0
  930. vmlal.u32 q9,d28,d0
  931. vmlal.u32 q5,d28,d2
  932. vld4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
  933. vmlal.u32 q8,d24,d1
  934. vld4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
  935. vmlal.u32 q6,d20,d1
  936. vmlal.u32 q9,d26,d1
  937. vmlal.u32 q7,d22,d1
  938. vmlal.u32 q8,d22,d3
  939. vld1.32 d8[1],[r7,:32]
  940. vmlal.u32 q5,d26,d4
  941. vld1.32 d8[0],[r6,:32]
  942. vmlal.u32 q9,d24,d3
  943. vmlal.u32 q6,d28,d4
  944. vmlal.u32 q7,d20,d3
  945. vmlal.u32 q8,d20,d5
  946. vmlal.u32 q5,d24,d6
  947. vmlal.u32 q9,d22,d5
  948. vmlal.u32 q6,d26,d6
  949. vmlal.u32 q7,d28,d6
  950. vmlal.u32 q8,d28,d8
  951. vorn q0,q0,q0 @ all-ones
  952. vmlal.u32 q5,d22,d8
  953. vshr.u64 q0,q0,#38
  954. vmlal.u32 q9,d20,d7
  955. vmlal.u32 q6,d24,d8
  956. vmlal.u32 q7,d26,d8
  957. .Lshort_tail:
  958. @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
  959. @ horizontal addition
  960. vadd.i64 d16,d16,d17
  961. vadd.i64 d10,d10,d11
  962. vadd.i64 d18,d18,d19
  963. vadd.i64 d12,d12,d13
  964. vadd.i64 d14,d14,d15
  965. @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
  966. @ lazy reduction, but without narrowing
  967. vshr.u64 q15,q8,#26
  968. vand.i64 q8,q8,q0
  969. vshr.u64 q4,q5,#26
  970. vand.i64 q5,q5,q0
  971. vadd.i64 q9,q9,q15 @ h3 -> h4
  972. vadd.i64 q6,q6,q4 @ h0 -> h1
  973. vshr.u64 q15,q9,#26
  974. vand.i64 q9,q9,q0
  975. vshr.u64 q4,q6,#26
  976. vand.i64 q6,q6,q0
  977. vadd.i64 q7,q7,q4 @ h1 -> h2
  978. vadd.i64 q5,q5,q15
  979. vshl.u64 q15,q15,#2
  980. vshr.u64 q4,q7,#26
  981. vand.i64 q7,q7,q0
  982. vadd.i64 q5,q5,q15 @ h4 -> h0
  983. vadd.i64 q8,q8,q4 @ h2 -> h3
  984. vshr.u64 q15,q5,#26
  985. vand.i64 q5,q5,q0
  986. vshr.u64 q4,q8,#26
  987. vand.i64 q8,q8,q0
  988. vadd.i64 q6,q6,q15 @ h0 -> h1
  989. vadd.i64 q9,q9,q4 @ h3 -> h4
  990. cmp r2,#0
  991. bne .Leven
  992. @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
  993. @ store hash value
  994. vst4.32 {d10[0],d12[0],d14[0],d16[0]},[r0]!
  995. vst1.32 {d18[0]},[r0]
  996. vldmia sp!,{d8-d15} @ epilogue
  997. ldmia sp!,{r4-r7}
  998. bx lr @ bx lr
  999. .size poly1305_blocks_neon,.-poly1305_blocks_neon
  1000. .align 5
  1001. .Lzeros:
  1002. .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
  1003. #ifndef __KERNEL__
  1004. .LOPENSSL_armcap:
  1005. # ifdef _WIN32
  1006. .word OPENSSL_armcap_P
  1007. # else
  1008. .word OPENSSL_armcap_P-.Lpoly1305_init
  1009. # endif
  1010. .comm OPENSSL_armcap_P,4,4
  1011. .hidden OPENSSL_armcap_P
  1012. #endif
  1013. #endif
  1014. .asciz "Poly1305 for ARMv4/NEON, CRYPTOGAMS by @dot-asm"
  1015. .align 2