memset.S 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * linux/arch/arm/lib/memset.S
  4. *
  5. * Copyright (C) 1995-2000 Russell King
  6. *
  7. * ASM optimised string functions
  8. */
  9. #include <linux/linkage.h>
  10. #include <asm/assembler.h>
  11. #include <asm/unwind.h>
  12. .text
  13. .align 5
  14. ENTRY(mmioset)
  15. ENTRY(memset)
  16. UNWIND( .fnstart )
  17. ands r3, r0, #3 @ 1 unaligned?
  18. mov ip, r0 @ preserve r0 as return value
  19. bne 6f @ 1
  20. /*
  21. * we know that the pointer in ip is aligned to a word boundary.
  22. */
  23. 1: orr r1, r1, r1, lsl #8
  24. orr r1, r1, r1, lsl #16
  25. mov r3, r1
  26. 7: cmp r2, #16
  27. blt 4f
  28. #if ! CALGN(1)+0
  29. /*
  30. * We need 2 extra registers for this loop - use r8 and the LR
  31. */
  32. stmfd sp!, {r8, lr}
  33. UNWIND( .fnend )
  34. UNWIND( .fnstart )
  35. UNWIND( .save {r8, lr} )
  36. mov r8, r1
  37. mov lr, r3
  38. 2: subs r2, r2, #64
  39. stmiage ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
  40. stmiage ip!, {r1, r3, r8, lr}
  41. stmiage ip!, {r1, r3, r8, lr}
  42. stmiage ip!, {r1, r3, r8, lr}
  43. bgt 2b
  44. ldmfdeq sp!, {r8, pc} @ Now <64 bytes to go.
  45. /*
  46. * No need to correct the count; we're only testing bits from now on
  47. */
  48. tst r2, #32
  49. stmiane ip!, {r1, r3, r8, lr}
  50. stmiane ip!, {r1, r3, r8, lr}
  51. tst r2, #16
  52. stmiane ip!, {r1, r3, r8, lr}
  53. ldmfd sp!, {r8, lr}
  54. UNWIND( .fnend )
  55. #else
  56. /*
  57. * This version aligns the destination pointer in order to write
  58. * whole cache lines at once.
  59. */
  60. stmfd sp!, {r4-r8, lr}
  61. UNWIND( .fnend )
  62. UNWIND( .fnstart )
  63. UNWIND( .save {r4-r8, lr} )
  64. mov r4, r1
  65. mov r5, r3
  66. mov r6, r1
  67. mov r7, r3
  68. mov r8, r1
  69. mov lr, r3
  70. cmp r2, #96
  71. tstgt ip, #31
  72. ble 3f
  73. and r8, ip, #31
  74. rsb r8, r8, #32
  75. sub r2, r2, r8
  76. movs r8, r8, lsl #(32 - 4)
  77. stmiacs ip!, {r4, r5, r6, r7}
  78. stmiami ip!, {r4, r5}
  79. tst r8, #(1 << 30)
  80. mov r8, r1
  81. strne r1, [ip], #4
  82. 3: subs r2, r2, #64
  83. stmiage ip!, {r1, r3-r8, lr}
  84. stmiage ip!, {r1, r3-r8, lr}
  85. bgt 3b
  86. ldmfdeq sp!, {r4-r8, pc}
  87. tst r2, #32
  88. stmiane ip!, {r1, r3-r8, lr}
  89. tst r2, #16
  90. stmiane ip!, {r4-r7}
  91. ldmfd sp!, {r4-r8, lr}
  92. UNWIND( .fnend )
  93. #endif
  94. UNWIND( .fnstart )
  95. 4: tst r2, #8
  96. stmiane ip!, {r1, r3}
  97. tst r2, #4
  98. strne r1, [ip], #4
  99. /*
  100. * When we get here, we've got less than 4 bytes to set. We
  101. * may have an unaligned pointer as well.
  102. */
  103. 5: tst r2, #2
  104. strbne r1, [ip], #1
  105. strbne r1, [ip], #1
  106. tst r2, #1
  107. strbne r1, [ip], #1
  108. ret lr
  109. 6: subs r2, r2, #4 @ 1 do we have enough
  110. blt 5b @ 1 bytes to align with?
  111. cmp r3, #2 @ 1
  112. strblt r1, [ip], #1 @ 1
  113. strble r1, [ip], #1 @ 1
  114. strb r1, [ip], #1 @ 1
  115. add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
  116. b 1b
  117. UNWIND( .fnend )
  118. ENDPROC(memset)
  119. ENDPROC(mmioset)
  120. ENTRY(__memset32)
  121. UNWIND( .fnstart )
  122. mov r3, r1 @ copy r1 to r3 and fall into memset64
  123. UNWIND( .fnend )
  124. ENDPROC(__memset32)
  125. ENTRY(__memset64)
  126. UNWIND( .fnstart )
  127. mov ip, r0 @ preserve r0 as return value
  128. b 7b @ jump into the middle of memset
  129. UNWIND( .fnend )
  130. ENDPROC(__memset64)