memcmp.S 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  4. */
  5. #include <linux/linkage.h>
  6. #ifdef __LITTLE_ENDIAN__
  7. #define WORD2 r2
  8. #define SHIFT r3
  9. #else /* BIG ENDIAN */
  10. #define WORD2 r3
  11. #define SHIFT r2
  12. #endif
  13. ENTRY_CFI(memcmp)
  14. or r12,r0,r1
  15. asl_s r12,r12,30
  16. sub r3,r2,1
  17. brls r2,r12,.Lbytewise
  18. ld r4,[r0,0]
  19. ld r5,[r1,0]
  20. lsr.f lp_count,r3,3
  21. #ifdef CONFIG_ISA_ARCV2
  22. /* In ARCv2 a branch can't be the last instruction in a zero overhead
  23. * loop.
  24. * So we move the branch to the start of the loop, duplicate it
  25. * after the end, and set up r12 so that the branch isn't taken
  26. * initially.
  27. */
  28. mov_s r12,WORD2
  29. lpne .Loop_end
  30. brne WORD2,r12,.Lodd
  31. ld WORD2,[r0,4]
  32. #else
  33. lpne .Loop_end
  34. ld_s WORD2,[r0,4]
  35. #endif
  36. ld_s r12,[r1,4]
  37. brne r4,r5,.Leven
  38. ld.a r4,[r0,8]
  39. ld.a r5,[r1,8]
  40. #ifdef CONFIG_ISA_ARCV2
  41. .Loop_end:
  42. brne WORD2,r12,.Lodd
  43. #else
  44. brne WORD2,r12,.Lodd
  45. .Loop_end:
  46. #endif
  47. asl_s SHIFT,SHIFT,3
  48. bhs_s .Last_cmp
  49. brne r4,r5,.Leven
  50. ld r4,[r0,4]
  51. ld r5,[r1,4]
  52. #ifdef __LITTLE_ENDIAN__
  53. nop_s
  54. ; one more load latency cycle
  55. .Last_cmp:
  56. xor r0,r4,r5
  57. bset r0,r0,SHIFT
  58. sub_s r1,r0,1
  59. bic_s r1,r1,r0
  60. norm r1,r1
  61. b.d .Leven_cmp
  62. and r1,r1,24
  63. .Leven:
  64. xor r0,r4,r5
  65. sub_s r1,r0,1
  66. bic_s r1,r1,r0
  67. norm r1,r1
  68. ; slow track insn
  69. and r1,r1,24
  70. .Leven_cmp:
  71. asl r2,r4,r1
  72. asl r12,r5,r1
  73. lsr_s r2,r2,1
  74. lsr_s r12,r12,1
  75. j_s.d [blink]
  76. sub r0,r2,r12
  77. .balign 4
  78. .Lodd:
  79. xor r0,WORD2,r12
  80. sub_s r1,r0,1
  81. bic_s r1,r1,r0
  82. norm r1,r1
  83. ; slow track insn
  84. and r1,r1,24
  85. asl_s r2,r2,r1
  86. asl_s r12,r12,r1
  87. lsr_s r2,r2,1
  88. lsr_s r12,r12,1
  89. j_s.d [blink]
  90. sub r0,r2,r12
  91. #else /* BIG ENDIAN */
  92. .Last_cmp:
  93. neg_s SHIFT,SHIFT
  94. lsr r4,r4,SHIFT
  95. lsr r5,r5,SHIFT
  96. ; slow track insn
  97. .Leven:
  98. sub.f r0,r4,r5
  99. mov.ne r0,1
  100. j_s.d [blink]
  101. bset.cs r0,r0,31
  102. .Lodd:
  103. cmp_s WORD2,r12
  104. mov_s r0,1
  105. j_s.d [blink]
  106. bset.cs r0,r0,31
  107. #endif /* ENDIAN */
  108. .balign 4
  109. .Lbytewise:
  110. breq r2,0,.Lnil
  111. ldb r4,[r0,0]
  112. ldb r5,[r1,0]
  113. lsr.f lp_count,r3
  114. #ifdef CONFIG_ISA_ARCV2
  115. mov r12,r3
  116. lpne .Lbyte_end
  117. brne r3,r12,.Lbyte_odd
  118. #else
  119. lpne .Lbyte_end
  120. #endif
  121. ldb_s r3,[r0,1]
  122. ldb r12,[r1,1]
  123. brne r4,r5,.Lbyte_even
  124. ldb.a r4,[r0,2]
  125. ldb.a r5,[r1,2]
  126. #ifdef CONFIG_ISA_ARCV2
  127. .Lbyte_end:
  128. brne r3,r12,.Lbyte_odd
  129. #else
  130. brne r3,r12,.Lbyte_odd
  131. .Lbyte_end:
  132. #endif
  133. bcc .Lbyte_even
  134. brne r4,r5,.Lbyte_even
  135. ldb_s r3,[r0,1]
  136. ldb_s r12,[r1,1]
  137. .Lbyte_odd:
  138. j_s.d [blink]
  139. sub r0,r3,r12
  140. .Lbyte_even:
  141. j_s.d [blink]
  142. sub r0,r4,r5
  143. .Lnil:
  144. j_s.d [blink]
  145. mov r0,0
  146. END_CFI(memcmp)