checksum_64.S 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /* checksum.S: Sparc V9 optimized checksum code.
  3. *
  4. * Copyright(C) 1995 Linus Torvalds
  5. * Copyright(C) 1995 Miguel de Icaza
  6. * Copyright(C) 1996, 2000 David S. Miller
  7. * Copyright(C) 1997 Jakub Jelinek
  8. *
  9. * derived from:
  10. * Linux/Alpha checksum c-code
  11. * Linux/ix86 inline checksum assembly
  12. * RFC1071 Computing the Internet Checksum (esp. Jacobsons m68k code)
  13. * David Mosberger-Tang for optimized reference c-code
  14. * BSD4.4 portable checksum routine
  15. */
  16. #include <asm/export.h>
  17. .text
  18. csum_partial_fix_alignment:
  19. /* We checked for zero length already, so there must be
  20. * at least one byte.
  21. */
  22. be,pt %icc, 1f
  23. nop
  24. ldub [%o0 + 0x00], %o4
  25. add %o0, 1, %o0
  26. sub %o1, 1, %o1
  27. 1: andcc %o0, 0x2, %g0
  28. be,pn %icc, csum_partial_post_align
  29. cmp %o1, 2
  30. blu,pn %icc, csum_partial_end_cruft
  31. nop
  32. lduh [%o0 + 0x00], %o5
  33. add %o0, 2, %o0
  34. sub %o1, 2, %o1
  35. ba,pt %xcc, csum_partial_post_align
  36. add %o5, %o4, %o4
  37. .align 32
  38. .globl csum_partial
  39. .type csum_partial,#function
  40. EXPORT_SYMBOL(csum_partial)
  41. csum_partial: /* %o0=buff, %o1=len, %o2=sum */
  42. prefetch [%o0 + 0x000], #n_reads
  43. clr %o4
  44. prefetch [%o0 + 0x040], #n_reads
  45. brz,pn %o1, csum_partial_finish
  46. andcc %o0, 0x3, %g0
  47. /* We "remember" whether the lowest bit in the address
  48. * was set in %g7. Because if it is, we have to swap
  49. * upper and lower 8 bit fields of the sum we calculate.
  50. */
  51. bne,pn %icc, csum_partial_fix_alignment
  52. andcc %o0, 0x1, %g7
  53. csum_partial_post_align:
  54. prefetch [%o0 + 0x080], #n_reads
  55. andncc %o1, 0x3f, %o3
  56. prefetch [%o0 + 0x0c0], #n_reads
  57. sub %o1, %o3, %o1
  58. brz,pn %o3, 2f
  59. prefetch [%o0 + 0x100], #n_reads
  60. /* So that we don't need to use the non-pairing
  61. * add-with-carry instructions we accumulate 32-bit
  62. * values into a 64-bit register. At the end of the
  63. * loop we fold it down to 32-bits and so on.
  64. */
  65. prefetch [%o0 + 0x140], #n_reads
  66. 1: lduw [%o0 + 0x00], %o5
  67. lduw [%o0 + 0x04], %g1
  68. lduw [%o0 + 0x08], %g2
  69. add %o4, %o5, %o4
  70. lduw [%o0 + 0x0c], %g3
  71. add %o4, %g1, %o4
  72. lduw [%o0 + 0x10], %o5
  73. add %o4, %g2, %o4
  74. lduw [%o0 + 0x14], %g1
  75. add %o4, %g3, %o4
  76. lduw [%o0 + 0x18], %g2
  77. add %o4, %o5, %o4
  78. lduw [%o0 + 0x1c], %g3
  79. add %o4, %g1, %o4
  80. lduw [%o0 + 0x20], %o5
  81. add %o4, %g2, %o4
  82. lduw [%o0 + 0x24], %g1
  83. add %o4, %g3, %o4
  84. lduw [%o0 + 0x28], %g2
  85. add %o4, %o5, %o4
  86. lduw [%o0 + 0x2c], %g3
  87. add %o4, %g1, %o4
  88. lduw [%o0 + 0x30], %o5
  89. add %o4, %g2, %o4
  90. lduw [%o0 + 0x34], %g1
  91. add %o4, %g3, %o4
  92. lduw [%o0 + 0x38], %g2
  93. add %o4, %o5, %o4
  94. lduw [%o0 + 0x3c], %g3
  95. add %o4, %g1, %o4
  96. prefetch [%o0 + 0x180], #n_reads
  97. add %o4, %g2, %o4
  98. subcc %o3, 0x40, %o3
  99. add %o0, 0x40, %o0
  100. bne,pt %icc, 1b
  101. add %o4, %g3, %o4
  102. 2: and %o1, 0x3c, %o3
  103. brz,pn %o3, 2f
  104. sub %o1, %o3, %o1
  105. 1: lduw [%o0 + 0x00], %o5
  106. subcc %o3, 0x4, %o3
  107. add %o0, 0x4, %o0
  108. bne,pt %icc, 1b
  109. add %o4, %o5, %o4
  110. 2:
  111. /* fold 64-->32 */
  112. srlx %o4, 32, %o5
  113. srl %o4, 0, %o4
  114. add %o4, %o5, %o4
  115. srlx %o4, 32, %o5
  116. srl %o4, 0, %o4
  117. add %o4, %o5, %o4
  118. /* fold 32-->16 */
  119. sethi %hi(0xffff0000), %g1
  120. srl %o4, 16, %o5
  121. andn %o4, %g1, %g2
  122. add %o5, %g2, %o4
  123. srl %o4, 16, %o5
  124. andn %o4, %g1, %g2
  125. add %o5, %g2, %o4
  126. csum_partial_end_cruft:
  127. /* %o4 has the 16-bit sum we have calculated so-far. */
  128. cmp %o1, 2
  129. blu,pt %icc, 1f
  130. nop
  131. lduh [%o0 + 0x00], %o5
  132. sub %o1, 2, %o1
  133. add %o0, 2, %o0
  134. add %o4, %o5, %o4
  135. 1: brz,pt %o1, 1f
  136. nop
  137. ldub [%o0 + 0x00], %o5
  138. sub %o1, 1, %o1
  139. add %o0, 1, %o0
  140. sllx %o5, 8, %o5
  141. add %o4, %o5, %o4
  142. 1:
  143. /* fold 32-->16 */
  144. sethi %hi(0xffff0000), %g1
  145. srl %o4, 16, %o5
  146. andn %o4, %g1, %g2
  147. add %o5, %g2, %o4
  148. srl %o4, 16, %o5
  149. andn %o4, %g1, %g2
  150. add %o5, %g2, %o4
  151. 1: brz,pt %g7, 1f
  152. nop
  153. /* We started with an odd byte, byte-swap the result. */
  154. srl %o4, 8, %o5
  155. and %o4, 0xff, %g1
  156. sll %g1, 8, %g1
  157. or %o5, %g1, %o4
  158. 1: addcc %o2, %o4, %o2
  159. addc %g0, %o2, %o2
  160. csum_partial_finish:
  161. retl
  162. srl %o2, 0, %o0