memcpy.S 2.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2013 Regents of the University of California
  4. */
  5. #include <linux/linkage.h>
  6. #include <asm/asm.h>
  7. /* void *memcpy(void *, const void *, size_t) */
  8. ENTRY(__memcpy)
  9. WEAK(memcpy)
  10. move t6, a0 /* Preserve return value */
  11. /* Defer to byte-oriented copy for small sizes */
  12. sltiu a3, a2, 128
  13. bnez a3, 4f
  14. /* Use word-oriented copy only if low-order bits match */
  15. andi a3, t6, SZREG-1
  16. andi a4, a1, SZREG-1
  17. bne a3, a4, 4f
  18. beqz a3, 2f /* Skip if already aligned */
  19. /*
  20. * Round to nearest double word-aligned address
  21. * greater than or equal to start address
  22. */
  23. andi a3, a1, ~(SZREG-1)
  24. addi a3, a3, SZREG
  25. /* Handle initial misalignment */
  26. sub a4, a3, a1
  27. 1:
  28. lb a5, 0(a1)
  29. addi a1, a1, 1
  30. sb a5, 0(t6)
  31. addi t6, t6, 1
  32. bltu a1, a3, 1b
  33. sub a2, a2, a4 /* Update count */
  34. 2:
  35. andi a4, a2, ~((16*SZREG)-1)
  36. beqz a4, 4f
  37. add a3, a1, a4
  38. 3:
  39. REG_L a4, 0(a1)
  40. REG_L a5, SZREG(a1)
  41. REG_L a6, 2*SZREG(a1)
  42. REG_L a7, 3*SZREG(a1)
  43. REG_L t0, 4*SZREG(a1)
  44. REG_L t1, 5*SZREG(a1)
  45. REG_L t2, 6*SZREG(a1)
  46. REG_L t3, 7*SZREG(a1)
  47. REG_L t4, 8*SZREG(a1)
  48. REG_L t5, 9*SZREG(a1)
  49. REG_S a4, 0(t6)
  50. REG_S a5, SZREG(t6)
  51. REG_S a6, 2*SZREG(t6)
  52. REG_S a7, 3*SZREG(t6)
  53. REG_S t0, 4*SZREG(t6)
  54. REG_S t1, 5*SZREG(t6)
  55. REG_S t2, 6*SZREG(t6)
  56. REG_S t3, 7*SZREG(t6)
  57. REG_S t4, 8*SZREG(t6)
  58. REG_S t5, 9*SZREG(t6)
  59. REG_L a4, 10*SZREG(a1)
  60. REG_L a5, 11*SZREG(a1)
  61. REG_L a6, 12*SZREG(a1)
  62. REG_L a7, 13*SZREG(a1)
  63. REG_L t0, 14*SZREG(a1)
  64. REG_L t1, 15*SZREG(a1)
  65. addi a1, a1, 16*SZREG
  66. REG_S a4, 10*SZREG(t6)
  67. REG_S a5, 11*SZREG(t6)
  68. REG_S a6, 12*SZREG(t6)
  69. REG_S a7, 13*SZREG(t6)
  70. REG_S t0, 14*SZREG(t6)
  71. REG_S t1, 15*SZREG(t6)
  72. addi t6, t6, 16*SZREG
  73. bltu a1, a3, 3b
  74. andi a2, a2, (16*SZREG)-1 /* Update count */
  75. 4:
  76. /* Handle trailing misalignment */
  77. beqz a2, 6f
  78. add a3, a1, a2
  79. /* Use word-oriented copy if co-aligned to word boundary */
  80. or a5, a1, t6
  81. or a5, a5, a3
  82. andi a5, a5, 3
  83. bnez a5, 5f
  84. 7:
  85. lw a4, 0(a1)
  86. addi a1, a1, 4
  87. sw a4, 0(t6)
  88. addi t6, t6, 4
  89. bltu a1, a3, 7b
  90. ret
  91. 5:
  92. lb a4, 0(a1)
  93. addi a1, a1, 1
  94. sb a4, 0(t6)
  95. addi t6, t6, 1
  96. bltu a1, a3, 5b
  97. 6:
  98. ret
  99. END(__memcpy)