memset.S 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2013 ARM Ltd.
  4. * Copyright (C) 2013 Linaro.
  5. *
  6. * This code is based on glibc cortex strings work originally authored by Linaro
  7. * be found @
  8. *
  9. * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
  10. * files/head:/src/aarch64/
  11. */
  12. #include <linux/linkage.h>
  13. #include <asm/assembler.h>
  14. #include <asm/cache.h>
  15. /*
  16. * Fill in the buffer with character c (alignment handled by the hardware)
  17. *
  18. * Parameters:
  19. * x0 - buf
  20. * x1 - c
  21. * x2 - n
  22. * Returns:
  23. * x0 - buf
  24. */
  25. dstin .req x0
  26. val .req w1
  27. count .req x2
  28. tmp1 .req x3
  29. tmp1w .req w3
  30. tmp2 .req x4
  31. tmp2w .req w4
  32. zva_len_x .req x5
  33. zva_len .req w5
  34. zva_bits_x .req x6
  35. A_l .req x7
  36. A_lw .req w7
  37. dst .req x8
  38. tmp3w .req w9
  39. tmp3 .req x9
  40. SYM_FUNC_START_ALIAS(__memset)
  41. SYM_FUNC_START_WEAK_PI(memset)
  42. mov dst, dstin /* Preserve return value. */
  43. and A_lw, val, #255
  44. orr A_lw, A_lw, A_lw, lsl #8
  45. orr A_lw, A_lw, A_lw, lsl #16
  46. orr A_l, A_l, A_l, lsl #32
  47. cmp count, #15
  48. b.hi .Lover16_proc
  49. /*All store maybe are non-aligned..*/
  50. tbz count, #3, 1f
  51. str A_l, [dst], #8
  52. 1:
  53. tbz count, #2, 2f
  54. str A_lw, [dst], #4
  55. 2:
  56. tbz count, #1, 3f
  57. strh A_lw, [dst], #2
  58. 3:
  59. tbz count, #0, 4f
  60. strb A_lw, [dst]
  61. 4:
  62. ret
  63. .Lover16_proc:
  64. /*Whether the start address is aligned with 16.*/
  65. neg tmp2, dst
  66. ands tmp2, tmp2, #15
  67. b.eq .Laligned
  68. /*
  69. * The count is not less than 16, we can use stp to store the start 16 bytes,
  70. * then adjust the dst aligned with 16.This process will make the current
  71. * memory address at alignment boundary.
  72. */
  73. stp A_l, A_l, [dst] /*non-aligned store..*/
  74. /*make the dst aligned..*/
  75. sub count, count, tmp2
  76. add dst, dst, tmp2
  77. .Laligned:
  78. cbz A_l, .Lzero_mem
  79. .Ltail_maybe_long:
  80. cmp count, #64
  81. b.ge .Lnot_short
  82. .Ltail63:
  83. ands tmp1, count, #0x30
  84. b.eq 3f
  85. cmp tmp1w, #0x20
  86. b.eq 1f
  87. b.lt 2f
  88. stp A_l, A_l, [dst], #16
  89. 1:
  90. stp A_l, A_l, [dst], #16
  91. 2:
  92. stp A_l, A_l, [dst], #16
  93. /*
  94. * The last store length is less than 16,use stp to write last 16 bytes.
  95. * It will lead some bytes written twice and the access is non-aligned.
  96. */
  97. 3:
  98. ands count, count, #15
  99. cbz count, 4f
  100. add dst, dst, count
  101. stp A_l, A_l, [dst, #-16] /* Repeat some/all of last store. */
  102. 4:
  103. ret
  104. /*
  105. * Critical loop. Start at a new cache line boundary. Assuming
  106. * 64 bytes per line, this ensures the entire loop is in one line.
  107. */
  108. .p2align L1_CACHE_SHIFT
  109. .Lnot_short:
  110. sub dst, dst, #16/* Pre-bias. */
  111. sub count, count, #64
  112. 1:
  113. stp A_l, A_l, [dst, #16]
  114. stp A_l, A_l, [dst, #32]
  115. stp A_l, A_l, [dst, #48]
  116. stp A_l, A_l, [dst, #64]!
  117. subs count, count, #64
  118. b.ge 1b
  119. tst count, #0x3f
  120. add dst, dst, #16
  121. b.ne .Ltail63
  122. .Lexitfunc:
  123. ret
  124. /*
  125. * For zeroing memory, check to see if we can use the ZVA feature to
  126. * zero entire 'cache' lines.
  127. */
  128. .Lzero_mem:
  129. cmp count, #63
  130. b.le .Ltail63
  131. /*
  132. * For zeroing small amounts of memory, it's not worth setting up
  133. * the line-clear code.
  134. */
  135. cmp count, #128
  136. b.lt .Lnot_short /*count is at least 128 bytes*/
  137. mrs tmp1, dczid_el0
  138. tbnz tmp1, #4, .Lnot_short
  139. mov tmp3w, #4
  140. and zva_len, tmp1w, #15 /* Safety: other bits reserved. */
  141. lsl zva_len, tmp3w, zva_len
  142. ands tmp3w, zva_len, #63
  143. /*
  144. * ensure the zva_len is not less than 64.
  145. * It is not meaningful to use ZVA if the block size is less than 64.
  146. */
  147. b.ne .Lnot_short
  148. .Lzero_by_line:
  149. /*
  150. * Compute how far we need to go to become suitably aligned. We're
  151. * already at quad-word alignment.
  152. */
  153. cmp count, zva_len_x
  154. b.lt .Lnot_short /* Not enough to reach alignment. */
  155. sub zva_bits_x, zva_len_x, #1
  156. neg tmp2, dst
  157. ands tmp2, tmp2, zva_bits_x
  158. b.eq 2f /* Already aligned. */
  159. /* Not aligned, check that there's enough to copy after alignment.*/
  160. sub tmp1, count, tmp2
  161. /*
  162. * grantee the remain length to be ZVA is bigger than 64,
  163. * avoid to make the 2f's process over mem range.*/
  164. cmp tmp1, #64
  165. ccmp tmp1, zva_len_x, #8, ge /* NZCV=0b1000 */
  166. b.lt .Lnot_short
  167. /*
  168. * We know that there's at least 64 bytes to zero and that it's safe
  169. * to overrun by 64 bytes.
  170. */
  171. mov count, tmp1
  172. 1:
  173. stp A_l, A_l, [dst]
  174. stp A_l, A_l, [dst, #16]
  175. stp A_l, A_l, [dst, #32]
  176. subs tmp2, tmp2, #64
  177. stp A_l, A_l, [dst, #48]
  178. add dst, dst, #64
  179. b.ge 1b
  180. /* We've overrun a bit, so adjust dst downwards.*/
  181. add dst, dst, tmp2
  182. 2:
  183. sub count, count, zva_len_x
  184. 3:
  185. dc zva, dst
  186. add dst, dst, zva_len_x
  187. subs count, count, zva_len_x
  188. b.ge 3b
  189. ands count, count, zva_bits_x
  190. b.ne .Ltail_maybe_long
  191. ret
  192. SYM_FUNC_END_PI(memset)
  193. EXPORT_SYMBOL(memset)
  194. SYM_FUNC_END_ALIAS(__memset)
  195. EXPORT_SYMBOL(__memset)