csumpartial.S 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. /*
  2. * linux/arch/arm/lib/csumpartial.S
  3. *
  4. * Copyright (C) 1995-1998 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/linkage.h>
  11. #include <asm/assembler.h>
  12. .text
  13. /*
  14. * Function: __u32 csum_partial(const char *src, int len, __u32 sum)
  15. * Params : r0 = buffer, r1 = len, r2 = checksum
  16. * Returns : r0 = new checksum
  17. */
  18. buf .req r0
  19. len .req r1
  20. sum .req r2
  21. td0 .req r3
  22. td1 .req r4 @ save before use
  23. td2 .req r5 @ save before use
  24. td3 .req lr
  25. .Lzero: mov r0, sum
  26. add sp, sp, #4
  27. ldr pc, [sp], #4
  28. /*
  29. * Handle 0 to 7 bytes, with any alignment of source and
  30. * destination pointers. Note that when we get here, C = 0
  31. */
  32. .Lless8: teq len, #0 @ check for zero count
  33. beq .Lzero
  34. /* we must have at least one byte. */
  35. tst buf, #1 @ odd address?
  36. movne sum, sum, ror #8
  37. ldrneb td0, [buf], #1
  38. subne len, len, #1
  39. adcnes sum, sum, td0, put_byte_1
  40. .Lless4: tst len, #6
  41. beq .Lless8_byte
  42. /* we are now half-word aligned */
  43. .Lless8_wordlp:
  44. #if __LINUX_ARM_ARCH__ >= 4
  45. ldrh td0, [buf], #2
  46. sub len, len, #2
  47. #else
  48. ldrb td0, [buf], #1
  49. ldrb td3, [buf], #1
  50. sub len, len, #2
  51. #ifndef __ARMEB__
  52. orr td0, td0, td3, lsl #8
  53. #else
  54. orr td0, td3, td0, lsl #8
  55. #endif
  56. #endif
  57. adcs sum, sum, td0
  58. tst len, #6
  59. bne .Lless8_wordlp
  60. .Lless8_byte: tst len, #1 @ odd number of bytes
  61. ldrneb td0, [buf], #1 @ include last byte
  62. adcnes sum, sum, td0, put_byte_0 @ update checksum
  63. .Ldone: adc r0, sum, #0 @ collect up the last carry
  64. ldr td0, [sp], #4
  65. tst td0, #1 @ check buffer alignment
  66. movne r0, r0, ror #8 @ rotate checksum by 8 bits
  67. ldr pc, [sp], #4 @ return
  68. .Lnot_aligned: tst buf, #1 @ odd address
  69. ldrneb td0, [buf], #1 @ make even
  70. subne len, len, #1
  71. adcnes sum, sum, td0, put_byte_1 @ update checksum
  72. tst buf, #2 @ 32-bit aligned?
  73. #if __LINUX_ARM_ARCH__ >= 4
  74. ldrneh td0, [buf], #2 @ make 32-bit aligned
  75. subne len, len, #2
  76. #else
  77. ldrneb td0, [buf], #1
  78. ldrneb ip, [buf], #1
  79. subne len, len, #2
  80. #ifndef __ARMEB__
  81. orrne td0, td0, ip, lsl #8
  82. #else
  83. orrne td0, ip, td0, lsl #8
  84. #endif
  85. #endif
  86. adcnes sum, sum, td0 @ update checksum
  87. mov pc, lr
  88. ENTRY(csum_partial)
  89. stmfd sp!, {buf, lr}
  90. cmp len, #8 @ Ensure that we have at least
  91. blo .Lless8 @ 8 bytes to copy.
  92. tst buf, #1
  93. movne sum, sum, ror #8
  94. adds sum, sum, #0 @ C = 0
  95. tst buf, #3 @ Test destination alignment
  96. blne .Lnot_aligned @ align destination, return here
  97. 1: bics ip, len, #31
  98. beq 3f
  99. stmfd sp!, {r4 - r5}
  100. 2: ldmia buf!, {td0, td1, td2, td3}
  101. adcs sum, sum, td0
  102. adcs sum, sum, td1
  103. adcs sum, sum, td2
  104. adcs sum, sum, td3
  105. ldmia buf!, {td0, td1, td2, td3}
  106. adcs sum, sum, td0
  107. adcs sum, sum, td1
  108. adcs sum, sum, td2
  109. adcs sum, sum, td3
  110. sub ip, ip, #32
  111. teq ip, #0
  112. bne 2b
  113. ldmfd sp!, {r4 - r5}
  114. 3: tst len, #0x1c @ should not change C
  115. beq .Lless4
  116. 4: ldr td0, [buf], #4
  117. sub len, len, #4
  118. adcs sum, sum, td0
  119. tst len, #0x1c
  120. bne 4b
  121. b .Lless4