SkChecksum_opts.h 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210
  1. /*
  2. * Copyright 2016 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #ifndef SkChecksum_opts_DEFINED
  8. #define SkChecksum_opts_DEFINED
  9. #include "include/core/SkTypes.h"
  10. #include "include/private/SkChecksum.h"
  11. #include "src/core/SkUtils.h" // sk_unaligned_load
  12. #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE42
  13. #include <immintrin.h>
  14. #elif defined(SK_ARM_HAS_CRC32)
  15. #include <arm_acle.h>
  16. #endif
  17. namespace SK_OPTS_NS {
  18. #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE42 && (defined(__x86_64__) || defined(_M_X64))
  19. // This is not a CRC32. It's Just A Hash that uses those instructions because they're fast.
  20. /*not static*/ inline uint32_t hash_fn(const void* vdata, size_t bytes, uint32_t seed) {
  21. auto data = (const uint8_t*)vdata;
  22. // _mm_crc32_u64() operates on 64-bit registers, so we use uint64_t for a while.
  23. uint64_t hash = seed;
  24. if (bytes >= 24) {
  25. // We'll create 3 independent hashes, each using _mm_crc32_u64()
  26. // to hash 8 bytes per step. Both 3 and independent are important:
  27. // we can execute 3 of these instructions in parallel on a single core.
  28. uint64_t a = hash,
  29. b = hash,
  30. c = hash;
  31. size_t steps = bytes/24;
  32. while (steps --> 0) {
  33. a = _mm_crc32_u64(a, sk_unaligned_load<uint64_t>(data+ 0));
  34. b = _mm_crc32_u64(b, sk_unaligned_load<uint64_t>(data+ 8));
  35. c = _mm_crc32_u64(c, sk_unaligned_load<uint64_t>(data+16));
  36. data += 24;
  37. }
  38. bytes %= 24;
  39. hash = _mm_crc32_u32(a, _mm_crc32_u32(b, c));
  40. }
  41. SkASSERT(bytes < 24);
  42. if (bytes >= 16) {
  43. hash = _mm_crc32_u64(hash, sk_unaligned_load<uint64_t>(data));
  44. bytes -= 8;
  45. data += 8;
  46. }
  47. SkASSERT(bytes < 16);
  48. if (bytes & 8) {
  49. hash = _mm_crc32_u64(hash, sk_unaligned_load<uint64_t>(data));
  50. data += 8;
  51. }
  52. // The remainder of these _mm_crc32_u*() operate on a 32-bit register.
  53. // We don't lose anything here: only the bottom 32-bits were populated.
  54. auto hash32 = (uint32_t)hash;
  55. if (bytes & 4) {
  56. hash32 = _mm_crc32_u32(hash32, sk_unaligned_load<uint32_t>(data));
  57. data += 4;
  58. }
  59. if (bytes & 2) {
  60. hash32 = _mm_crc32_u16(hash32, sk_unaligned_load<uint16_t>(data));
  61. data += 2;
  62. }
  63. if (bytes & 1) {
  64. hash32 = _mm_crc32_u8(hash32, sk_unaligned_load<uint8_t>(data));
  65. }
  66. return hash32;
  67. }
  68. #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE42
  69. // 32-bit version of above, using _mm_crc32_u32() but not _mm_crc32_u64().
  70. /*not static*/ inline uint32_t hash_fn(const void* vdata, size_t bytes, uint32_t hash) {
  71. auto data = (const uint8_t*)vdata;
  72. if (bytes >= 12) {
  73. // We'll create 3 independent hashes, each using _mm_crc32_u32()
  74. // to hash 4 bytes per step. Both 3 and independent are important:
  75. // we can execute 3 of these instructions in parallel on a single core.
  76. uint32_t a = hash,
  77. b = hash,
  78. c = hash;
  79. size_t steps = bytes/12;
  80. while (steps --> 0) {
  81. a = _mm_crc32_u32(a, sk_unaligned_load<uint32_t>(data+0));
  82. b = _mm_crc32_u32(b, sk_unaligned_load<uint32_t>(data+4));
  83. c = _mm_crc32_u32(c, sk_unaligned_load<uint32_t>(data+8));
  84. data += 12;
  85. }
  86. bytes %= 12;
  87. hash = _mm_crc32_u32(a, _mm_crc32_u32(b, c));
  88. }
  89. SkASSERT(bytes < 12);
  90. if (bytes >= 8) {
  91. hash = _mm_crc32_u32(hash, sk_unaligned_load<uint32_t>(data));
  92. bytes -= 4;
  93. data += 4;
  94. }
  95. SkASSERT(bytes < 8);
  96. if (bytes & 4) {
  97. hash = _mm_crc32_u32(hash, sk_unaligned_load<uint32_t>(data));
  98. data += 4;
  99. }
  100. if (bytes & 2) {
  101. hash = _mm_crc32_u16(hash, sk_unaligned_load<uint16_t>(data));
  102. data += 2;
  103. }
  104. if (bytes & 1) {
  105. hash = _mm_crc32_u8(hash, sk_unaligned_load<uint8_t>(data));
  106. }
  107. return hash;
  108. }
  109. #elif defined(SK_ARM_HAS_CRC32)
  110. /*not static*/ inline uint32_t hash_fn(const void* vdata, size_t bytes, uint32_t hash) {
  111. auto data = (const uint8_t*)vdata;
  112. if (bytes >= 24) {
  113. uint32_t a = hash,
  114. b = hash,
  115. c = hash;
  116. size_t steps = bytes/24;
  117. while (steps --> 0) {
  118. a = __crc32d(a, sk_unaligned_load<uint64_t>(data+ 0));
  119. b = __crc32d(b, sk_unaligned_load<uint64_t>(data+ 8));
  120. c = __crc32d(c, sk_unaligned_load<uint64_t>(data+16));
  121. data += 24;
  122. }
  123. bytes %= 24;
  124. hash = __crc32w(a, __crc32w(b, c));
  125. }
  126. SkASSERT(bytes < 24);
  127. if (bytes >= 16) {
  128. hash = __crc32d(hash, sk_unaligned_load<uint64_t>(data));
  129. bytes -= 8;
  130. data += 8;
  131. }
  132. SkASSERT(bytes < 16);
  133. if (bytes & 8) {
  134. hash = __crc32d(hash, sk_unaligned_load<uint64_t>(data));
  135. data += 8;
  136. }
  137. if (bytes & 4) {
  138. hash = __crc32w(hash, sk_unaligned_load<uint32_t>(data));
  139. data += 4;
  140. }
  141. if (bytes & 2) {
  142. hash = __crc32h(hash, sk_unaligned_load<uint16_t>(data));
  143. data += 2;
  144. }
  145. if (bytes & 1) {
  146. hash = __crc32b(hash, sk_unaligned_load<uint8_t>(data));
  147. }
  148. return hash;
  149. }
  150. #else
  151. // This is Murmur3.
  152. /*not static*/ inline uint32_t hash_fn(const void* vdata, size_t bytes, uint32_t hash) {
  153. auto data = (const uint8_t*)vdata;
  154. size_t original_bytes = bytes;
  155. // Handle 4 bytes at a time while possible.
  156. while (bytes >= 4) {
  157. uint32_t k = sk_unaligned_load<uint32_t>(data);
  158. k *= 0xcc9e2d51;
  159. k = (k << 15) | (k >> 17);
  160. k *= 0x1b873593;
  161. hash ^= k;
  162. hash = (hash << 13) | (hash >> 19);
  163. hash *= 5;
  164. hash += 0xe6546b64;
  165. bytes -= 4;
  166. data += 4;
  167. }
  168. // Handle last 0-3 bytes.
  169. uint32_t k = 0;
  170. switch (bytes & 3) {
  171. case 3: k ^= data[2] << 16;
  172. case 2: k ^= data[1] << 8;
  173. case 1: k ^= data[0] << 0;
  174. k *= 0xcc9e2d51;
  175. k = (k << 15) | (k >> 17);
  176. k *= 0x1b873593;
  177. hash ^= k;
  178. }
  179. hash ^= original_bytes;
  180. return SkChecksum::Mix(hash);
  181. }
  182. #endif
  183. } // namespace SK_OPTS_NS
  184. #endif//SkChecksum_opts_DEFINED