SkBitmapProcState_opts.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369
  1. /*
  2. * Copyright 2018 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #ifndef SkBitmapProcState_opts_DEFINED
  8. #define SkBitmapProcState_opts_DEFINED
  9. #include "src/core/SkBitmapProcState.h"
  10. // SkBitmapProcState optimized Shader, Sample, or Matrix procs.
  11. //
  12. // Only S32_alpha_D32_filter_DX exploits instructions beyond
  13. // our common baseline SSE2/NEON instruction sets, so that's
  14. // all that lives here.
  15. //
  16. // The rest are scattershot at the moment but I want to get them
  17. // all migrated to be normal code inside SkBitmapProcState.cpp.
  18. #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
  19. #include <immintrin.h>
  20. #elif defined(SK_ARM_HAS_NEON)
  21. #include <arm_neon.h>
  22. #endif
  23. namespace SK_OPTS_NS {
  24. // This same basic packing scheme is used throughout the file.
  25. static void decode_packed_coordinates_and_weight(uint32_t packed, int* v0, int* v1, int* w) {
  26. // The top 14 bits are the integer coordinate x0 or y0.
  27. *v0 = packed >> 18;
  28. // The bottom 14 bits are the integer coordinate x1 or y1.
  29. *v1 = packed & 0x3fff;
  30. // The middle 4 bits are the interpolating factor between the two, i.e. the weight for v1.
  31. *w = (packed >> 14) & 0xf;
  32. }
  33. #if 1 && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
  34. // As above, 4x.
  35. static void decode_packed_coordinates_and_weight(__m128i packed,
  36. int v0[4], int v1[4], __m128i* w) {
  37. _mm_storeu_si128((__m128i*)v0, _mm_srli_epi32(packed, 18));
  38. _mm_storeu_si128((__m128i*)v1, _mm_and_si128 (packed, _mm_set1_epi32(0x3fff)));
  39. *w = _mm_and_si128(_mm_srli_epi32(packed, 14), _mm_set1_epi32(0xf));
  40. }
  41. // This is the crux of the SSSE3 implementation,
  42. // interpolating in X for up to two output pixels (A and B) using _mm_maddubs_epi16().
  43. static inline __m128i interpolate_in_x(uint32_t A0, uint32_t A1,
  44. uint32_t B0, uint32_t B1,
  45. const __m128i& interlaced_x_weights) {
  46. // _mm_maddubs_epi16() is a little idiosyncratic, but very helpful as the core of a lerp.
  47. //
  48. // It takes two arguments interlaced byte-wise:
  49. // - first arg: [ x,y, ... 7 more pairs of 8-bit values ...]
  50. // - second arg: [ z,w, ... 7 more pairs of 8-bit values ...]
  51. // and returns 8 16-bit values: [ x*z + y*w, ... 7 more 16-bit values ... ].
  52. //
  53. // That's why we go to all this trouble to make interlaced_x_weights,
  54. // and here we're interlacing A0 with A1, B0 with B1 to match.
  55. __m128i interlaced_A = _mm_unpacklo_epi8(_mm_cvtsi32_si128(A0), _mm_cvtsi32_si128(A1)),
  56. interlaced_B = _mm_unpacklo_epi8(_mm_cvtsi32_si128(B0), _mm_cvtsi32_si128(B1));
  57. return _mm_maddubs_epi16(_mm_unpacklo_epi64(interlaced_A, interlaced_B),
  58. interlaced_x_weights);
  59. }
  60. // Interpolate {A0..A3} --> output pixel A, and {B0..B3} --> output pixel B.
  61. // Returns two pixels, with each channel in a 16-bit lane of the __m128i.
  62. static inline __m128i interpolate_in_x_and_y(uint32_t A0, uint32_t A1,
  63. uint32_t A2, uint32_t A3,
  64. uint32_t B0, uint32_t B1,
  65. uint32_t B2, uint32_t B3,
  66. const __m128i& interlaced_x_weights,
  67. int wy) {
  68. // The stored Y weight wy is for y1, and y0 gets a weight 16-wy.
  69. const __m128i wy1 = _mm_set1_epi16(wy),
  70. wy0 = _mm_sub_epi16(_mm_set1_epi16(16), wy1);
  71. // First interpolate in X,
  72. // leaving the values in 16-bit lanes scaled up by those [0,16] interlaced_x_weights.
  73. __m128i row0 = interpolate_in_x(A0,A1, B0,B1, interlaced_x_weights),
  74. row1 = interpolate_in_x(A2,A3, B2,B3, interlaced_x_weights);
  75. // Interpolate in Y across the two rows,
  76. // then scale everything down by the maximum total weight 16x16 = 256.
  77. return _mm_srli_epi16(_mm_add_epi16(_mm_mullo_epi16(row0, wy0),
  78. _mm_mullo_epi16(row1, wy1)), 8);
  79. }
  80. /*not static*/ inline
  81. void S32_alpha_D32_filter_DX(const SkBitmapProcState& s,
  82. const uint32_t* xy, int count, uint32_t* colors) {
  83. SkASSERT(count > 0 && colors != nullptr);
  84. SkASSERT(s.fFilterQuality != kNone_SkFilterQuality);
  85. SkASSERT(kN32_SkColorType == s.fPixmap.colorType());
  86. int alpha = s.fAlphaScale;
  87. // Return (px * s.fAlphaScale) / 256. (s.fAlphaScale is in [0,256].)
  88. auto scale_by_alpha = [alpha](const __m128i& px) {
  89. return alpha == 256 ? px
  90. : _mm_srli_epi16(_mm_mullo_epi16(px, _mm_set1_epi16(alpha)), 8);
  91. };
  92. // We're in _DX_ mode here, so we're only varying in X.
  93. // That means the first entry of xy is our constant pair of Y coordinates and weight in Y.
  94. // All the other entries in xy will be pairs of X coordinates and the X weight.
  95. int y0, y1, wy;
  96. decode_packed_coordinates_and_weight(*xy++, &y0, &y1, &wy);
  97. auto row0 = (const uint32_t*)((const uint8_t*)s.fPixmap.addr() + y0 * s.fPixmap.rowBytes()),
  98. row1 = (const uint32_t*)((const uint8_t*)s.fPixmap.addr() + y1 * s.fPixmap.rowBytes());
  99. while (count >= 4) {
  100. // We can really get going, loading 4 X pairs at a time to produce 4 output pixels.
  101. const __m128i xx = _mm_loadu_si128((const __m128i*)xy);
  102. int x0[4],
  103. x1[4];
  104. __m128i wx;
  105. decode_packed_coordinates_and_weight(xx, x0, x1, &wx);
  106. // Splat out each x weight wx four times (one for each pixel channel) as wx1,
  107. // and sixteen minus that as the weight for x0, wx0.
  108. __m128i wx1 = _mm_shuffle_epi8(wx, _mm_setr_epi8(0,0,0,0,4,4,4,4,8,8,8,8,12,12,12,12)),
  109. wx0 = _mm_sub_epi8(_mm_set1_epi8(16), wx1);
  110. // We need to interlace wx0 and wx1 for _mm_maddubs_epi16().
  111. __m128i interlaced_x_weights_AB = _mm_unpacklo_epi8(wx0,wx1),
  112. interlaced_x_weights_CD = _mm_unpackhi_epi8(wx0,wx1);
  113. // interpolate_in_x_and_y() can produce two output pixels (A and B) at a time
  114. // from eight input pixels {A0..A3} and {B0..B3}, arranged in a 2x2 grid for each.
  115. __m128i AB = interpolate_in_x_and_y(row0[x0[0]], row0[x1[0]],
  116. row1[x0[0]], row1[x1[0]],
  117. row0[x0[1]], row0[x1[1]],
  118. row1[x0[1]], row1[x1[1]],
  119. interlaced_x_weights_AB, wy);
  120. // Once more with the other half of the x-weights for two more pixels C,D.
  121. __m128i CD = interpolate_in_x_and_y(row0[x0[2]], row0[x1[2]],
  122. row1[x0[2]], row1[x1[2]],
  123. row0[x0[3]], row0[x1[3]],
  124. row1[x0[3]], row1[x1[3]],
  125. interlaced_x_weights_CD, wy);
  126. // Scale by alpha, pack back together to 8-bit lanes, and write out four pixels!
  127. _mm_storeu_si128((__m128i*)colors, _mm_packus_epi16(scale_by_alpha(AB),
  128. scale_by_alpha(CD)));
  129. xy += 4;
  130. colors += 4;
  131. count -= 4;
  132. }
  133. while (count --> 0) {
  134. // This is exactly the same flow as the count >= 4 loop above, but writing one pixel.
  135. int x0, x1, wx;
  136. decode_packed_coordinates_and_weight(*xy++, &x0, &x1, &wx);
  137. // As above, splat out wx four times as wx1, and sixteen minus that as wx0.
  138. __m128i wx1 = _mm_set1_epi8(wx), // This splats it out 16 times, but that's fine.
  139. wx0 = _mm_sub_epi8(_mm_set1_epi8(16), wx1);
  140. __m128i interlaced_x_weights_A = _mm_unpacklo_epi8(wx0, wx1);
  141. __m128i A = interpolate_in_x_and_y(row0[x0], row0[x1],
  142. row1[x0], row1[x1],
  143. 0, 0,
  144. 0, 0,
  145. interlaced_x_weights_A, wy);
  146. *colors++ = _mm_cvtsi128_si32(_mm_packus_epi16(scale_by_alpha(A), _mm_setzero_si128()));
  147. }
  148. }
  149. #elif 1 && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
  150. // TODO(mtklein): clean up this code, use decode_packed_coordinates_and_weight(), etc.
  151. /*not static*/ inline
  152. void S32_alpha_D32_filter_DX(const SkBitmapProcState& s,
  153. const uint32_t* xy, int count, uint32_t* colors) {
  154. SkASSERT(count > 0 && colors != nullptr);
  155. SkASSERT(s.fFilterQuality != kNone_SkFilterQuality);
  156. SkASSERT(kN32_SkColorType == s.fPixmap.colorType());
  157. SkASSERT(s.fAlphaScale <= 256);
  158. int y0, y1, wy;
  159. decode_packed_coordinates_and_weight(*xy++, &y0, &y1, &wy);
  160. auto row0 = (const uint32_t*)( (const char*)s.fPixmap.addr() + y0 * s.fPixmap.rowBytes() ),
  161. row1 = (const uint32_t*)( (const char*)s.fPixmap.addr() + y1 * s.fPixmap.rowBytes() );
  162. // We'll put one pixel in the low 4 16-bit lanes to line up with wy,
  163. // and another in the upper 4 16-bit lanes to line up with 16 - wy.
  164. const __m128i allY = _mm_unpacklo_epi64(_mm_set1_epi16( wy),
  165. _mm_set1_epi16(16-wy));
  166. while (count --> 0) {
  167. int x0, x1, wx;
  168. decode_packed_coordinates_and_weight(*xy++, &x0, &x1, &wx);
  169. // Load the 4 pixels we're interpolating.
  170. const __m128i a00 = _mm_cvtsi32_si128(row0[x0]),
  171. a01 = _mm_cvtsi32_si128(row0[x1]),
  172. a10 = _mm_cvtsi32_si128(row1[x0]),
  173. a11 = _mm_cvtsi32_si128(row1[x1]);
  174. // Line up low-x pixels a00 and a10 with allY.
  175. __m128i a00a10 = _mm_unpacklo_epi8(_mm_unpacklo_epi32(a10, a00),
  176. _mm_setzero_si128());
  177. // Scale by allY and 16-wx.
  178. a00a10 = _mm_mullo_epi16(a00a10, allY);
  179. a00a10 = _mm_mullo_epi16(a00a10, _mm_set1_epi16(16-wx));
  180. // Line up high-x pixels a01 and a11 with allY.
  181. __m128i a01a11 = _mm_unpacklo_epi8(_mm_unpacklo_epi32(a11, a01),
  182. _mm_setzero_si128());
  183. // Scale by allY and wx.
  184. a01a11 = _mm_mullo_epi16(a01a11, allY);
  185. a01a11 = _mm_mullo_epi16(a01a11, _mm_set1_epi16(wx));
  186. // Add the two intermediates, summing across in one direction.
  187. __m128i halves = _mm_add_epi16(a00a10, a01a11);
  188. // Add the two halves to each other to sum in the other direction.
  189. __m128i sum = _mm_add_epi16(halves, _mm_srli_si128(halves, 8));
  190. // Get back to [0,255] by dividing by maximum weight 16x16 = 256.
  191. sum = _mm_srli_epi16(sum, 8);
  192. if (s.fAlphaScale < 256) {
  193. // Scale by alpha, which is in [0,256].
  194. sum = _mm_mullo_epi16(sum, _mm_set1_epi16(s.fAlphaScale));
  195. sum = _mm_srli_epi16(sum, 8);
  196. }
  197. // Pack back into 8-bit values and store.
  198. *colors++ = _mm_cvtsi128_si32(_mm_packus_epi16(sum, _mm_setzero_si128()));
  199. }
  200. }
  201. #else
  202. // The NEON code only actually differs from the portable code in the
  203. // filtering step after we've loaded all four pixels we want to bilerp.
  204. #if defined(SK_ARM_HAS_NEON)
  205. static void filter_and_scale_by_alpha(unsigned x, unsigned y,
  206. SkPMColor a00, SkPMColor a01,
  207. SkPMColor a10, SkPMColor a11,
  208. SkPMColor *dst,
  209. uint16_t scale) {
  210. uint8x8_t vy, vconst16_8, v16_y, vres;
  211. uint16x4_t vx, vconst16_16, v16_x, tmp, vscale;
  212. uint32x2_t va0, va1;
  213. uint16x8_t tmp1, tmp2;
  214. vy = vdup_n_u8(y); // duplicate y into vy
  215. vconst16_8 = vmov_n_u8(16); // set up constant in vconst16_8
  216. v16_y = vsub_u8(vconst16_8, vy); // v16_y = 16-y
  217. va0 = vdup_n_u32(a00); // duplicate a00
  218. va1 = vdup_n_u32(a10); // duplicate a10
  219. va0 = vset_lane_u32(a01, va0, 1); // set top to a01
  220. va1 = vset_lane_u32(a11, va1, 1); // set top to a11
  221. tmp1 = vmull_u8(vreinterpret_u8_u32(va0), v16_y); // tmp1 = [a01|a00] * (16-y)
  222. tmp2 = vmull_u8(vreinterpret_u8_u32(va1), vy); // tmp2 = [a11|a10] * y
  223. vx = vdup_n_u16(x); // duplicate x into vx
  224. vconst16_16 = vmov_n_u16(16); // set up constant in vconst16_16
  225. v16_x = vsub_u16(vconst16_16, vx); // v16_x = 16-x
  226. tmp = vmul_u16(vget_high_u16(tmp1), vx); // tmp = a01 * x
  227. tmp = vmla_u16(tmp, vget_high_u16(tmp2), vx); // tmp += a11 * x
  228. tmp = vmla_u16(tmp, vget_low_u16(tmp1), v16_x); // tmp += a00 * (16-x)
  229. tmp = vmla_u16(tmp, vget_low_u16(tmp2), v16_x); // tmp += a10 * (16-x)
  230. if (scale < 256) {
  231. vscale = vdup_n_u16(scale); // duplicate scale
  232. tmp = vshr_n_u16(tmp, 8); // shift down result by 8
  233. tmp = vmul_u16(tmp, vscale); // multiply result by scale
  234. }
  235. vres = vshrn_n_u16(vcombine_u16(tmp, vcreate_u16(0)), 8); // shift down result by 8
  236. vst1_lane_u32(dst, vreinterpret_u32_u8(vres), 0); // store result
  237. }
  238. #else
  239. static void filter_and_scale_by_alpha(unsigned x, unsigned y,
  240. SkPMColor a00, SkPMColor a01,
  241. SkPMColor a10, SkPMColor a11,
  242. SkPMColor* dstColor,
  243. unsigned alphaScale) {
  244. SkASSERT((unsigned)x <= 0xF);
  245. SkASSERT((unsigned)y <= 0xF);
  246. SkASSERT(alphaScale <= 256);
  247. int xy = x * y;
  248. const uint32_t mask = 0xFF00FF;
  249. int scale = 256 - 16*y - 16*x + xy;
  250. uint32_t lo = (a00 & mask) * scale;
  251. uint32_t hi = ((a00 >> 8) & mask) * scale;
  252. scale = 16*x - xy;
  253. lo += (a01 & mask) * scale;
  254. hi += ((a01 >> 8) & mask) * scale;
  255. scale = 16*y - xy;
  256. lo += (a10 & mask) * scale;
  257. hi += ((a10 >> 8) & mask) * scale;
  258. lo += (a11 & mask) * xy;
  259. hi += ((a11 >> 8) & mask) * xy;
  260. if (alphaScale < 256) {
  261. lo = ((lo >> 8) & mask) * alphaScale;
  262. hi = ((hi >> 8) & mask) * alphaScale;
  263. }
  264. *dstColor = ((lo >> 8) & mask) | (hi & ~mask);
  265. }
  266. #endif
  267. /*not static*/ inline
  268. void S32_alpha_D32_filter_DX(const SkBitmapProcState& s,
  269. const uint32_t* xy, int count, SkPMColor* colors) {
  270. SkASSERT(count > 0 && colors != nullptr);
  271. SkASSERT(s.fFilterQuality != kNone_SkFilterQuality);
  272. SkASSERT(4 == s.fPixmap.info().bytesPerPixel());
  273. SkASSERT(s.fAlphaScale <= 256);
  274. int y0, y1, wy;
  275. decode_packed_coordinates_and_weight(*xy++, &y0, &y1, &wy);
  276. auto row0 = (const uint32_t*)( (const char*)s.fPixmap.addr() + y0 * s.fPixmap.rowBytes() ),
  277. row1 = (const uint32_t*)( (const char*)s.fPixmap.addr() + y1 * s.fPixmap.rowBytes() );
  278. while (count --> 0) {
  279. int x0, x1, wx;
  280. decode_packed_coordinates_and_weight(*xy++, &x0, &x1, &wx);
  281. filter_and_scale_by_alpha(wx, wy,
  282. row0[x0], row0[x1],
  283. row1[x0], row1[x1],
  284. colors++,
  285. s.fAlphaScale);
  286. }
  287. }
  288. #endif
  289. } // namespace SK_OPTS_NS
  290. #endif