SkBlitRow_opts.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378
  1. /*
  2. * Copyright 2015 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #ifndef SkBlitRow_opts_DEFINED
  8. #define SkBlitRow_opts_DEFINED
  9. #include "include/private/SkColorData.h"
  10. #include "include/private/SkVx.h"
  11. #include "src/core/SkMSAN.h"
  12. #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
  13. #include <immintrin.h>
  14. static inline __m256i SkPMSrcOver_AVX2(const __m256i& src, const __m256i& dst) {
  15. // Abstractly srcover is
  16. // b = s + d*(1-srcA)
  17. //
  18. // In terms of unorm8 bytes, that works out to
  19. // b = s + (d*(255-srcA) + 127) / 255
  20. //
  21. // But we approximate that to within a bit with
  22. // b = s + (d*(255-srcA) + d) / 256
  23. // a.k.a
  24. // b = s + (d*(256-srcA)) >> 8
  25. // The bottleneck of this math is the multiply, and we want to do it as
  26. // narrowly as possible, here getting inputs into 16-bit lanes and
  27. // using 16-bit multiplies. We can do twice as many multiplies at once
  28. // as using naive 32-bit multiplies, and on top of that, the 16-bit multiplies
  29. // are themselves a couple cycles quicker. Win-win.
  30. // We'll get everything in 16-bit lanes for two multiplies, one
  31. // handling dst red and blue, the other green and alpha. (They're
  32. // conveniently 16-bits apart, you see.) We don't need the individual
  33. // src channels beyond alpha until the very end when we do the "s + "
  34. // add, and we don't even need to unpack them; the adds cannot overflow.
  35. // Shuffle each pixel's srcA to the low byte of each 16-bit half of the pixel.
  36. const int _ = -1; // fills a literal 0 byte.
  37. __m256i srcA_x2 = _mm256_shuffle_epi8(src,
  38. _mm256_setr_epi8(3,_,3,_, 7,_,7,_, 11,_,11,_, 15,_,15,_,
  39. 3,_,3,_, 7,_,7,_, 11,_,11,_, 15,_,15,_));
  40. __m256i scale_x2 = _mm256_sub_epi16(_mm256_set1_epi16(256),
  41. srcA_x2);
  42. // Scale red and blue, leaving results in the low byte of each 16-bit lane.
  43. __m256i rb = _mm256_and_si256(_mm256_set1_epi32(0x00ff00ff), dst);
  44. rb = _mm256_mullo_epi16(rb, scale_x2);
  45. rb = _mm256_srli_epi16 (rb, 8);
  46. // Scale green and alpha, leaving results in the high byte, masking off the low bits.
  47. __m256i ga = _mm256_srli_epi16(dst, 8);
  48. ga = _mm256_mullo_epi16(ga, scale_x2);
  49. ga = _mm256_andnot_si256(_mm256_set1_epi32(0x00ff00ff), ga);
  50. return _mm256_add_epi32(src, _mm256_or_si256(rb, ga));
  51. }
  52. #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
  53. #include <immintrin.h>
  54. static inline __m128i SkPMSrcOver_SSE2(const __m128i& src, const __m128i& dst) {
  55. auto SkAlphaMulQ_SSE2 = [](const __m128i& c, const __m128i& scale) {
  56. const __m128i mask = _mm_set1_epi32(0xFF00FF);
  57. __m128i s = _mm_or_si128(_mm_slli_epi32(scale, 16), scale);
  58. // uint32_t rb = ((c & mask) * scale) >> 8
  59. __m128i rb = _mm_and_si128(mask, c);
  60. rb = _mm_mullo_epi16(rb, s);
  61. rb = _mm_srli_epi16(rb, 8);
  62. // uint32_t ag = ((c >> 8) & mask) * scale
  63. __m128i ag = _mm_srli_epi16(c, 8);
  64. ag = _mm_mullo_epi16(ag, s);
  65. // (rb & mask) | (ag & ~mask)
  66. ag = _mm_andnot_si128(mask, ag);
  67. return _mm_or_si128(rb, ag);
  68. };
  69. return _mm_add_epi32(src,
  70. SkAlphaMulQ_SSE2(dst, _mm_sub_epi32(_mm_set1_epi32(256),
  71. _mm_srli_epi32(src, 24))));
  72. }
  73. #endif
  74. namespace SK_OPTS_NS {
  75. // Blend constant color over count src pixels, writing into dst.
  76. inline void blit_row_color32(SkPMColor* dst, const SkPMColor* src, int count, SkPMColor color) {
  77. constexpr int N = 4; // 8, 16 also reasonable choices
  78. using U32 = skvx::Vec< N, uint32_t>;
  79. using U16 = skvx::Vec<4*N, uint16_t>;
  80. using U8 = skvx::Vec<4*N, uint8_t>;
  81. auto kernel = [color](U32 src) {
  82. unsigned invA = 255 - SkGetPackedA32(color);
  83. invA += invA >> 7;
  84. SkASSERT(0 < invA && invA < 256); // We handle alpha == 0 or alpha == 255 specially.
  85. // (src * invA + (color << 8) + 128) >> 8
  86. // Should all fit in 16 bits.
  87. U8 s = skvx::bit_pun<U8>(src),
  88. a = U8(invA);
  89. U16 c = skvx::cast<uint16_t>(skvx::bit_pun<U8>(U32(color))),
  90. d = (mull(s,a) + (c << 8) + 128)>>8;
  91. return skvx::bit_pun<U32>(skvx::cast<uint8_t>(d));
  92. };
  93. while (count >= N) {
  94. kernel(U32::Load(src)).store(dst);
  95. src += N;
  96. dst += N;
  97. count -= N;
  98. }
  99. while (count --> 0) {
  100. *dst++ = kernel(U32{*src++})[0];
  101. }
  102. }
  103. #if defined(SK_ARM_HAS_NEON)
  104. // Return a uint8x8_t value, r, computed as r[i] = SkMulDiv255Round(x[i], y[i]), where r[i], x[i],
  105. // y[i] are the i-th lanes of the corresponding NEON vectors.
  106. static inline uint8x8_t SkMulDiv255Round_neon8(uint8x8_t x, uint8x8_t y) {
  107. uint16x8_t prod = vmull_u8(x, y);
  108. return vraddhn_u16(prod, vrshrq_n_u16(prod, 8));
  109. }
  110. // The implementations of SkPMSrcOver below perform alpha blending consistently with
  111. // SkMulDiv255Round. They compute the color components (numbers in the interval [0, 255]) as:
  112. //
  113. // result_i = src_i + rint(g(src_alpha, dst_i))
  114. //
  115. // where g(x, y) = ((255.0 - x) * y) / 255.0 and rint rounds to the nearest integer.
  116. // In this variant of SkPMSrcOver each NEON register, dst.val[i], src.val[i], contains the value
  117. // of the same color component for 8 consecutive pixels. The result of this function follows the
  118. // same convention.
  119. static inline uint8x8x4_t SkPMSrcOver_neon8(uint8x8x4_t dst, uint8x8x4_t src) {
  120. uint8x8_t nalphas = vmvn_u8(src.val[3]);
  121. uint8x8x4_t result;
  122. result.val[0] = vadd_u8(src.val[0], SkMulDiv255Round_neon8(nalphas, dst.val[0]));
  123. result.val[1] = vadd_u8(src.val[1], SkMulDiv255Round_neon8(nalphas, dst.val[1]));
  124. result.val[2] = vadd_u8(src.val[2], SkMulDiv255Round_neon8(nalphas, dst.val[2]));
  125. result.val[3] = vadd_u8(src.val[3], SkMulDiv255Round_neon8(nalphas, dst.val[3]));
  126. return result;
  127. }
  128. // In this variant of SkPMSrcOver dst and src contain the color components of two consecutive
  129. // pixels. The return value follows the same convention.
  130. static inline uint8x8_t SkPMSrcOver_neon2(uint8x8_t dst, uint8x8_t src) {
  131. const uint8x8_t alpha_indices = vcreate_u8(0x0707070703030303);
  132. uint8x8_t nalphas = vmvn_u8(vtbl1_u8(src, alpha_indices));
  133. return vadd_u8(src, SkMulDiv255Round_neon8(nalphas, dst));
  134. }
  135. #endif
  136. /*not static*/ inline
  137. void blit_row_s32a_opaque(SkPMColor* dst, const SkPMColor* src, int len, U8CPU alpha) {
  138. SkASSERT(alpha == 0xFF);
  139. sk_msan_assert_initialized(src, src+len);
  140. // Require AVX2 because of AVX2 integer calculation intrinsics in SrcOver
  141. #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
  142. while (len >= 32) {
  143. // Load 32 source pixels.
  144. auto s0 = _mm256_loadu_si256((const __m256i*)(src) + 0),
  145. s1 = _mm256_loadu_si256((const __m256i*)(src) + 1),
  146. s2 = _mm256_loadu_si256((const __m256i*)(src) + 2),
  147. s3 = _mm256_loadu_si256((const __m256i*)(src) + 3);
  148. const auto alphaMask = _mm256_set1_epi32(0xFF000000);
  149. auto ORed = _mm256_or_si256(s3, _mm256_or_si256(s2, _mm256_or_si256(s1, s0)));
  150. if (_mm256_testz_si256(ORed, alphaMask)) {
  151. // All 32 source pixels are transparent. Nothing to do.
  152. src += 32;
  153. dst += 32;
  154. len -= 32;
  155. continue;
  156. }
  157. auto d0 = (__m256i*)(dst) + 0,
  158. d1 = (__m256i*)(dst) + 1,
  159. d2 = (__m256i*)(dst) + 2,
  160. d3 = (__m256i*)(dst) + 3;
  161. auto ANDed = _mm256_and_si256(s3, _mm256_and_si256(s2, _mm256_and_si256(s1, s0)));
  162. if (_mm256_testc_si256(ANDed, alphaMask)) {
  163. // All 32 source pixels are opaque. SrcOver becomes Src.
  164. _mm256_storeu_si256(d0, s0);
  165. _mm256_storeu_si256(d1, s1);
  166. _mm256_storeu_si256(d2, s2);
  167. _mm256_storeu_si256(d3, s3);
  168. src += 32;
  169. dst += 32;
  170. len -= 32;
  171. continue;
  172. }
  173. // TODO: This math is wrong.
  174. // Do SrcOver.
  175. _mm256_storeu_si256(d0, SkPMSrcOver_AVX2(s0, _mm256_loadu_si256(d0)));
  176. _mm256_storeu_si256(d1, SkPMSrcOver_AVX2(s1, _mm256_loadu_si256(d1)));
  177. _mm256_storeu_si256(d2, SkPMSrcOver_AVX2(s2, _mm256_loadu_si256(d2)));
  178. _mm256_storeu_si256(d3, SkPMSrcOver_AVX2(s3, _mm256_loadu_si256(d3)));
  179. src += 32;
  180. dst += 32;
  181. len -= 32;
  182. }
  183. #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
  184. while (len >= 16) {
  185. // Load 16 source pixels.
  186. auto s0 = _mm_loadu_si128((const __m128i*)(src) + 0),
  187. s1 = _mm_loadu_si128((const __m128i*)(src) + 1),
  188. s2 = _mm_loadu_si128((const __m128i*)(src) + 2),
  189. s3 = _mm_loadu_si128((const __m128i*)(src) + 3);
  190. const auto alphaMask = _mm_set1_epi32(0xFF000000);
  191. auto ORed = _mm_or_si128(s3, _mm_or_si128(s2, _mm_or_si128(s1, s0)));
  192. if (_mm_testz_si128(ORed, alphaMask)) {
  193. // All 16 source pixels are transparent. Nothing to do.
  194. src += 16;
  195. dst += 16;
  196. len -= 16;
  197. continue;
  198. }
  199. auto d0 = (__m128i*)(dst) + 0,
  200. d1 = (__m128i*)(dst) + 1,
  201. d2 = (__m128i*)(dst) + 2,
  202. d3 = (__m128i*)(dst) + 3;
  203. auto ANDed = _mm_and_si128(s3, _mm_and_si128(s2, _mm_and_si128(s1, s0)));
  204. if (_mm_testc_si128(ANDed, alphaMask)) {
  205. // All 16 source pixels are opaque. SrcOver becomes Src.
  206. _mm_storeu_si128(d0, s0);
  207. _mm_storeu_si128(d1, s1);
  208. _mm_storeu_si128(d2, s2);
  209. _mm_storeu_si128(d3, s3);
  210. src += 16;
  211. dst += 16;
  212. len -= 16;
  213. continue;
  214. }
  215. // TODO: This math is wrong.
  216. // Do SrcOver.
  217. _mm_storeu_si128(d0, SkPMSrcOver_SSE2(s0, _mm_loadu_si128(d0)));
  218. _mm_storeu_si128(d1, SkPMSrcOver_SSE2(s1, _mm_loadu_si128(d1)));
  219. _mm_storeu_si128(d2, SkPMSrcOver_SSE2(s2, _mm_loadu_si128(d2)));
  220. _mm_storeu_si128(d3, SkPMSrcOver_SSE2(s3, _mm_loadu_si128(d3)));
  221. src += 16;
  222. dst += 16;
  223. len -= 16;
  224. }
  225. #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
  226. while (len >= 16) {
  227. // Load 16 source pixels.
  228. auto s0 = _mm_loadu_si128((const __m128i*)(src) + 0),
  229. s1 = _mm_loadu_si128((const __m128i*)(src) + 1),
  230. s2 = _mm_loadu_si128((const __m128i*)(src) + 2),
  231. s3 = _mm_loadu_si128((const __m128i*)(src) + 3);
  232. const auto alphaMask = _mm_set1_epi32(0xFF000000);
  233. auto ORed = _mm_or_si128(s3, _mm_or_si128(s2, _mm_or_si128(s1, s0)));
  234. if (0xffff == _mm_movemask_epi8(_mm_cmpeq_epi8(_mm_and_si128(ORed, alphaMask),
  235. _mm_setzero_si128()))) {
  236. // All 16 source pixels are transparent. Nothing to do.
  237. src += 16;
  238. dst += 16;
  239. len -= 16;
  240. continue;
  241. }
  242. auto d0 = (__m128i*)(dst) + 0,
  243. d1 = (__m128i*)(dst) + 1,
  244. d2 = (__m128i*)(dst) + 2,
  245. d3 = (__m128i*)(dst) + 3;
  246. auto ANDed = _mm_and_si128(s3, _mm_and_si128(s2, _mm_and_si128(s1, s0)));
  247. if (0xffff == _mm_movemask_epi8(_mm_cmpeq_epi8(_mm_and_si128(ANDed, alphaMask),
  248. alphaMask))) {
  249. // All 16 source pixels are opaque. SrcOver becomes Src.
  250. _mm_storeu_si128(d0, s0);
  251. _mm_storeu_si128(d1, s1);
  252. _mm_storeu_si128(d2, s2);
  253. _mm_storeu_si128(d3, s3);
  254. src += 16;
  255. dst += 16;
  256. len -= 16;
  257. continue;
  258. }
  259. // TODO: This math is wrong.
  260. // Do SrcOver.
  261. _mm_storeu_si128(d0, SkPMSrcOver_SSE2(s0, _mm_loadu_si128(d0)));
  262. _mm_storeu_si128(d1, SkPMSrcOver_SSE2(s1, _mm_loadu_si128(d1)));
  263. _mm_storeu_si128(d2, SkPMSrcOver_SSE2(s2, _mm_loadu_si128(d2)));
  264. _mm_storeu_si128(d3, SkPMSrcOver_SSE2(s3, _mm_loadu_si128(d3)));
  265. src += 16;
  266. dst += 16;
  267. len -= 16;
  268. }
  269. #elif defined(SK_ARM_HAS_NEON)
  270. // Do 8-pixels at a time. A 16-pixels at a time version of this code was also tested, but it
  271. // underperformed on some of the platforms under test for inputs with frequent transitions of
  272. // alpha (corresponding to changes of the conditions [~]alpha_u64 == 0 below). It may be worth
  273. // revisiting the situation in the future.
  274. while (len >= 8) {
  275. // Load 8 pixels in 4 NEON registers. src_col.val[i] will contain the same color component
  276. // for 8 consecutive pixels (e.g. src_col.val[3] will contain all alpha components of 8
  277. // pixels).
  278. uint8x8x4_t src_col = vld4_u8(reinterpret_cast<const uint8_t*>(src));
  279. src += 8;
  280. len -= 8;
  281. // We now detect 2 special cases: the first occurs when all alphas are zero (the 8 pixels
  282. // are all transparent), the second when all alphas are fully set (they are all opaque).
  283. uint8x8_t alphas = src_col.val[3];
  284. uint64_t alphas_u64 = vget_lane_u64(vreinterpret_u64_u8(alphas), 0);
  285. if (alphas_u64 == 0) {
  286. // All pixels transparent.
  287. dst += 8;
  288. continue;
  289. }
  290. if (~alphas_u64 == 0) {
  291. // All pixels opaque.
  292. vst4_u8(reinterpret_cast<uint8_t*>(dst), src_col);
  293. dst += 8;
  294. continue;
  295. }
  296. uint8x8x4_t dst_col = vld4_u8(reinterpret_cast<uint8_t*>(dst));
  297. vst4_u8(reinterpret_cast<uint8_t*>(dst), SkPMSrcOver_neon8(dst_col, src_col));
  298. dst += 8;
  299. }
  300. // Deal with leftover pixels.
  301. for (; len >= 2; len -= 2, src += 2, dst += 2) {
  302. uint8x8_t src2 = vld1_u8(reinterpret_cast<const uint8_t*>(src));
  303. uint8x8_t dst2 = vld1_u8(reinterpret_cast<const uint8_t*>(dst));
  304. vst1_u8(reinterpret_cast<uint8_t*>(dst), SkPMSrcOver_neon2(dst2, src2));
  305. }
  306. if (len != 0) {
  307. uint8x8_t result = SkPMSrcOver_neon2(vcreate_u8(*dst), vcreate_u8(*src));
  308. vst1_lane_u32(dst, vreinterpret_u32_u8(result), 0);
  309. }
  310. return;
  311. #endif
  312. while (len-- > 0) {
  313. // This 0xFF000000 is not semantically necessary, but for compatibility
  314. // with chromium:611002 we need to keep it until we figure out where
  315. // the non-premultiplied src values (like 0x00FFFFFF) are coming from.
  316. // TODO(mtklein): sort this out and assert *src is premul here.
  317. if (*src & 0xFF000000) {
  318. *dst = (*src >= 0xFF000000) ? *src : SkPMSrcOver(*src, *dst);
  319. }
  320. src++;
  321. dst++;
  322. }
  323. }
  324. } // SK_OPTS_NS
  325. #endif//SkBlitRow_opts_DEFINED