string.S 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * Copyright (C) Paul Mackerras 1997.
  4. *
  5. * NOTE: this code runs in 32 bit mode and is packaged as ELF32.
  6. */
  7. #include "ppc_asm.h"
  8. .text
  9. .globl strcpy
  10. strcpy:
  11. addi r5,r3,-1
  12. addi r4,r4,-1
  13. 1: lbzu r0,1(r4)
  14. cmpwi 0,r0,0
  15. stbu r0,1(r5)
  16. bne 1b
  17. blr
  18. .globl strncpy
  19. strncpy:
  20. cmpwi 0,r5,0
  21. beqlr
  22. mtctr r5
  23. addi r6,r3,-1
  24. addi r4,r4,-1
  25. 1: lbzu r0,1(r4)
  26. cmpwi 0,r0,0
  27. stbu r0,1(r6)
  28. bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
  29. blr
  30. .globl strcat
  31. strcat:
  32. addi r5,r3,-1
  33. addi r4,r4,-1
  34. 1: lbzu r0,1(r5)
  35. cmpwi 0,r0,0
  36. bne 1b
  37. addi r5,r5,-1
  38. 1: lbzu r0,1(r4)
  39. cmpwi 0,r0,0
  40. stbu r0,1(r5)
  41. bne 1b
  42. blr
  43. .globl strchr
  44. strchr:
  45. addi r3,r3,-1
  46. 1: lbzu r0,1(r3)
  47. cmpw 0,r0,r4
  48. beqlr
  49. cmpwi 0,r0,0
  50. bne 1b
  51. li r3,0
  52. blr
  53. .globl strcmp
  54. strcmp:
  55. addi r5,r3,-1
  56. addi r4,r4,-1
  57. 1: lbzu r3,1(r5)
  58. cmpwi 1,r3,0
  59. lbzu r0,1(r4)
  60. subf. r3,r0,r3
  61. beqlr 1
  62. beq 1b
  63. blr
  64. .globl strncmp
  65. strncmp:
  66. mtctr r5
  67. addi r5,r3,-1
  68. addi r4,r4,-1
  69. 1: lbzu r3,1(r5)
  70. cmpwi 1,r3,0
  71. lbzu r0,1(r4)
  72. subf. r3,r0,r3
  73. beqlr 1
  74. bdnzt eq,1b
  75. blr
  76. .globl strlen
  77. strlen:
  78. addi r4,r3,-1
  79. 1: lbzu r0,1(r4)
  80. cmpwi 0,r0,0
  81. bne 1b
  82. subf r3,r3,r4
  83. blr
  84. .globl memset
  85. memset:
  86. rlwimi r4,r4,8,16,23
  87. rlwimi r4,r4,16,0,15
  88. addi r6,r3,-4
  89. cmplwi 0,r5,4
  90. blt 7f
  91. stwu r4,4(r6)
  92. beqlr
  93. andi. r0,r6,3
  94. add r5,r0,r5
  95. subf r6,r0,r6
  96. rlwinm r0,r5,32-2,2,31
  97. mtctr r0
  98. bdz 6f
  99. 1: stwu r4,4(r6)
  100. bdnz 1b
  101. 6: andi. r5,r5,3
  102. 7: cmpwi 0,r5,0
  103. beqlr
  104. mtctr r5
  105. addi r6,r6,3
  106. 8: stbu r4,1(r6)
  107. bdnz 8b
  108. blr
  109. .globl memmove
  110. memmove:
  111. cmplw 0,r3,r4
  112. bgt backwards_memcpy
  113. /* fall through */
  114. .globl memcpy
  115. memcpy:
  116. rlwinm. r7,r5,32-3,3,31 /* r7 = r5 >> 3 */
  117. addi r6,r3,-4
  118. addi r4,r4,-4
  119. beq 3f /* if less than 8 bytes to do */
  120. andi. r0,r6,3 /* get dest word aligned */
  121. mtctr r7
  122. bne 5f
  123. andi. r0,r4,3 /* check src word aligned too */
  124. bne 3f
  125. 1: lwz r7,4(r4)
  126. lwzu r8,8(r4)
  127. stw r7,4(r6)
  128. stwu r8,8(r6)
  129. bdnz 1b
  130. andi. r5,r5,7
  131. 2: cmplwi 0,r5,4
  132. blt 3f
  133. lwzu r0,4(r4)
  134. addi r5,r5,-4
  135. stwu r0,4(r6)
  136. 3: cmpwi 0,r5,0
  137. beqlr
  138. mtctr r5
  139. addi r4,r4,3
  140. addi r6,r6,3
  141. 4: lbzu r0,1(r4)
  142. stbu r0,1(r6)
  143. bdnz 4b
  144. blr
  145. 5: subfic r0,r0,4
  146. cmpw cr1,r0,r5
  147. add r7,r0,r4
  148. andi. r7,r7,3 /* will source be word-aligned too? */
  149. ble cr1,3b
  150. bne 3b /* do byte-by-byte if not */
  151. mtctr r0
  152. 6: lbz r7,4(r4)
  153. addi r4,r4,1
  154. stb r7,4(r6)
  155. addi r6,r6,1
  156. bdnz 6b
  157. subf r5,r0,r5
  158. rlwinm. r7,r5,32-3,3,31
  159. beq 2b
  160. mtctr r7
  161. b 1b
  162. .globl backwards_memcpy
  163. backwards_memcpy:
  164. rlwinm. r7,r5,32-3,3,31 /* r7 = r5 >> 3 */
  165. add r6,r3,r5
  166. add r4,r4,r5
  167. beq 3f
  168. andi. r0,r6,3
  169. mtctr r7
  170. bne 5f
  171. andi. r0,r4,3
  172. bne 3f
  173. 1: lwz r7,-4(r4)
  174. lwzu r8,-8(r4)
  175. stw r7,-4(r6)
  176. stwu r8,-8(r6)
  177. bdnz 1b
  178. andi. r5,r5,7
  179. 2: cmplwi 0,r5,4
  180. blt 3f
  181. lwzu r0,-4(r4)
  182. subi r5,r5,4
  183. stwu r0,-4(r6)
  184. 3: cmpwi 0,r5,0
  185. beqlr
  186. mtctr r5
  187. 4: lbzu r0,-1(r4)
  188. stbu r0,-1(r6)
  189. bdnz 4b
  190. blr
  191. 5: cmpw cr1,r0,r5
  192. subf r7,r0,r4
  193. andi. r7,r7,3
  194. ble cr1,3b
  195. bne 3b
  196. mtctr r0
  197. 6: lbzu r7,-1(r4)
  198. stbu r7,-1(r6)
  199. bdnz 6b
  200. subf r5,r0,r5
  201. rlwinm. r7,r5,32-3,3,31
  202. beq 2b
  203. mtctr r7
  204. b 1b
  205. .globl memchr
  206. memchr:
  207. cmpwi 0,r5,0
  208. blelr
  209. mtctr r5
  210. addi r3,r3,-1
  211. 1: lbzu r0,1(r3)
  212. cmpw r0,r4
  213. beqlr
  214. bdnz 1b
  215. li r3,0
  216. blr
  217. .globl memcmp
  218. memcmp:
  219. cmpwi 0,r5,0
  220. ble 2f
  221. mtctr r5
  222. addi r6,r3,-1
  223. addi r4,r4,-1
  224. 1: lbzu r3,1(r6)
  225. lbzu r0,1(r4)
  226. subf. r3,r0,r3
  227. bdnzt 2,1b
  228. blr
  229. 2: li r3,0
  230. blr
  231. /*
  232. * Flush the dcache and invalidate the icache for a range of addresses.
  233. *
  234. * flush_cache(addr, len)
  235. */
  236. .global flush_cache
  237. flush_cache:
  238. addi 4,4,0x1f /* len = (len + 0x1f) / 0x20 */
  239. rlwinm. 4,4,27,5,31
  240. mtctr 4
  241. beqlr
  242. 1: dcbf 0,3
  243. icbi 0,3
  244. addi 3,3,0x20
  245. bdnz 1b
  246. sync
  247. isync
  248. blr