memcpy.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134
  1. /*
  2. * (C) 2018 Kai-Uwe Bloem <derkub@gmail.com>
  3. *
  4. * 32bit ARM/MIPS optimized C implementation of memcpy and memove, designed for
  5. * good performance with gcc.
  6. * - if src and dest have the same alignment, 4-word copy is used.
  7. * - if src and dest are unaligned to each other, still loads word data and
  8. * stores correctly shifted word data (for all but the first and last bytes
  9. * to avoid under/overstepping the src region).
  10. *
  11. * ATTN does dirty aliasing tricks with undefined behaviour by standard.
  12. * (however, this improved the generated code).
  13. * ATTN uses struct assignment, which only works if the compiler is inlining
  14. * this (else it would probably call memcpy :-)).
  15. */
  16. #include <stdlib.h>
  17. #include <stdint.h>
  18. #include <endian.h>
  19. #if __BYTE_ORDER == __LITTLE_ENDIAN
  20. #define _L_ >>
  21. #define _U_ <<
  22. #else
  23. #define _L_ <<
  24. #define _U_ >>
  25. #endif
  26. void *memcpy(void *dest, const void *src, size_t n)
  27. {
  28. struct _16 { uint32_t a[4]; };
  29. union { const void *v; uint8_t *c; uint32_t *i; uint64_t *l; struct _16 *s; }
  30. ss = { src }, ds = { dest };
  31. const int lm = sizeof(uint32_t)-1;
  32. /* align src to word */
  33. while (((uintptr_t)ss.c & lm) && n > 0)
  34. *ds.c++ = *ss.c++, n--;
  35. if (((uintptr_t)ds.c & lm) == 0) {
  36. /* fast copy if pointers have the same aligment */
  37. while (n >= sizeof(struct _16)) /* copy 16 byte blocks */
  38. *ds.s++ = *ss.s++, n -= sizeof(struct _16);
  39. if (n >= sizeof(uint64_t)) /* copy leftover 8 byte block */
  40. *ds.l++ = *ss.l++, n -= sizeof(uint64_t);
  41. // if (n >= sizeof(uint32_t)) /* copy leftover 4 byte block */
  42. // *ds.i++ = *ss.i++, n -= sizeof(uint32_t);
  43. } else if (n >= 2*sizeof(uint32_t)) {
  44. /* unaligned data big enough to avoid overstepping src */
  45. uint32_t v1, v2, b, s;
  46. /* align dest to word */
  47. while (((uintptr_t)ds.c & lm) && n > 0)
  48. *ds.c++ = *ss.c++, n--;
  49. /* copy loop: load aligned words and store shifted words */
  50. b = (uintptr_t)ss.c & lm, s = b*8; ss.c -= b;
  51. v1 = *ss.i++, v2 = *ss.i++;
  52. while (n >= 3*sizeof(uint32_t)) {
  53. *ds.i++ = (v1 _L_ s) | (v2 _U_ (32-s)); v1 = *ss.i++;
  54. *ds.i++ = (v2 _L_ s) | (v1 _U_ (32-s)); v2 = *ss.i++;
  55. n -= 2*sizeof(uint32_t);
  56. }
  57. /* data for one more store is already loaded */
  58. if (n >= sizeof(uint32_t)) {
  59. *ds.i++ = (v1 _L_ s) | (v2 _U_ (32-s));
  60. n -= sizeof(uint32_t);
  61. ss.c += sizeof(uint32_t);
  62. }
  63. ss.c += b - 2*sizeof(uint32_t);
  64. }
  65. /* copy 0-7 leftover bytes */
  66. while (n >= 4) {
  67. *ds.c++ = *ss.c++, n--; *ds.c++ = *ss.c++, n--;
  68. *ds.c++ = *ss.c++, n--; *ds.c++ = *ss.c++, n--;
  69. }
  70. while (n > 0)
  71. *ds.c++ = *ss.c++, n--;
  72. return dest;
  73. }
  74. void *memmove (void *dest, const void *src, size_t n)
  75. {
  76. struct _16 { uint32_t a[4]; };
  77. union { const void *v; uint8_t *c; uint32_t *i; uint64_t *l; struct _16 *s; }
  78. ss = { src+n }, ds = { dest+n };
  79. size_t pd = dest > src ? dest - src : src - dest;
  80. const int lm = sizeof(uint32_t)-1;
  81. if (dest <= src || dest >= src+n)
  82. return memcpy(dest, src, n);
  83. /* align src to word */
  84. while (((uintptr_t)ss.c & lm) && n > 0)
  85. *--ds.c = *--ss.c, n--;
  86. /* take care not to copy multi-byte data if it overlaps */
  87. if (((uintptr_t)ds.c & lm) == 0) {
  88. /* fast copy if pointers have the same aligment */
  89. while (n >= sizeof(struct _16) && pd >= sizeof(struct _16))
  90. /* copy 16 bytes blocks if no overlap */
  91. *--ds.s = *--ss.s, n -= sizeof(struct _16);
  92. while (n >= sizeof(uint64_t) && pd >= sizeof(uint64_t))
  93. /* copy leftover 8 byte blocks if no overlap */
  94. *--ds.l = *--ss.l, n -= sizeof(uint64_t);
  95. while (n >= sizeof(uint32_t) && pd >= sizeof(uint32_t))
  96. /* copy leftover 4 byte blocks if no overlap */
  97. *--ds.i = *--ss.i, n -= sizeof(uint32_t);
  98. } else if (n >= 2*sizeof(uint32_t) && pd >= 2*sizeof(uint32_t)) {
  99. /* unaligned data big enough to avoid understepping src */
  100. uint32_t v1, v2, b, s;
  101. /* align dest to word */
  102. while (((uintptr_t)ds.c & lm) && n > 0)
  103. *--ds.c = *--ss.c, n--;
  104. /* copy loop: load aligned words and store shifted words */
  105. b = (uintptr_t)ss.c & lm, s = b*8; ss.c += b;
  106. v1 = *--ss.i, v2 = *--ss.i;
  107. while (n >= 3*sizeof(uint32_t)) {
  108. *--ds.i = (v1 _U_ s) | (v2 _L_ (32-s)); v1 = *--ss.i;
  109. *--ds.i = (v2 _U_ s) | (v1 _L_ (32-s)); v2 = *--ss.i;
  110. n -= 2*sizeof(uint32_t);
  111. }
  112. /* data for one more store is already loaded */
  113. if (n >= sizeof(uint32_t)) {
  114. *--ds.i = (v1 _U_ s) | (v2 _L_ (32-s));
  115. n -= sizeof(uint32_t);
  116. ss.c -= sizeof(uint32_t);
  117. }
  118. ss.c -= b - 2*sizeof(uint32_t);
  119. }
  120. /* copy 0-7 leftover bytes (or upto everything if ptrs are too close) */
  121. while (n >= 4) {
  122. *--ds.c = *--ss.c, n--; *--ds.c = *--ss.c, n--;
  123. *--ds.c = *--ss.c, n--; *--ds.c = *--ss.c, n--;
  124. }
  125. while (n > 0)
  126. *--ds.c = *--ss.c, n--;
  127. return dest;
  128. }