SkVMBuilders.cpp 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. /*
  2. * Copyright 2019 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #include "tools/SkVMBuilders.h"
  8. // Some parts of this builder code are written less fluently than possible,
  9. // to avoid any ambiguity of function argument evaluation order. This lets
  10. // our golden tests work portably. In general there's no reason to fear
  11. // nesting calls to Builder routines.
  12. SrcoverBuilder_F32::SrcoverBuilder_F32(Fmt srcFmt, Fmt dstFmt) {
  13. auto byte_to_f32 = [&](skvm::I32 byte) {
  14. skvm::F32 _1_255 = splat(1/255.0f);
  15. return mul(_1_255, to_f32(byte));
  16. };
  17. auto load = [&](Fmt fmt, skvm::F32* r, skvm::F32* g, skvm::F32* b, skvm::F32* a) {
  18. skvm::Arg ptr;
  19. switch (fmt) {
  20. case Fmt::A8: {
  21. ptr = arg<uint8_t>();
  22. *r = *g = *b = splat(0.0f);
  23. *a = byte_to_f32(load8(ptr));
  24. } break;
  25. case Fmt::G8: {
  26. ptr = arg<uint8_t>();
  27. *r = *g = *b = byte_to_f32(load8(ptr));
  28. *a = splat(1.0f);
  29. } break;
  30. case Fmt::RGBA_8888: {
  31. ptr = arg<int>();
  32. skvm::I32 rgba = load32(ptr);
  33. *r = byte_to_f32(extract(rgba, 0, splat(0xff)));
  34. *g = byte_to_f32(extract(rgba, 8, splat(0xff)));
  35. *b = byte_to_f32(extract(rgba, 16, splat(0xff)));
  36. *a = byte_to_f32(extract(rgba, 24, splat(0xff)));
  37. } break;
  38. }
  39. return ptr;
  40. };
  41. skvm::F32 r,g,b,a;
  42. (void)load(srcFmt, &r,&g,&b,&a);
  43. skvm::F32 dr,dg,db,da;
  44. skvm::Arg dst = load(dstFmt, &dr,&dg,&db,&da);
  45. skvm::F32 invA = sub(splat(1.0f), a);
  46. r = mad(dr, invA, r);
  47. g = mad(dg, invA, g);
  48. b = mad(db, invA, b);
  49. a = mad(da, invA, a);
  50. auto f32_to_byte = [&](skvm::F32 f32) {
  51. skvm::F32 _255 = splat(255.0f),
  52. _0_5 = splat(0.5f);
  53. return to_i32(mad(f32, _255, _0_5));
  54. };
  55. switch (dstFmt) {
  56. case Fmt::A8: {
  57. store8(dst, f32_to_byte(a));
  58. } break;
  59. case Fmt::G8: {
  60. skvm::F32 _2126 = splat(0.2126f),
  61. _7152 = splat(0.7152f),
  62. _0722 = splat(0.0722f);
  63. store8(dst, f32_to_byte(mad(r, _2126,
  64. mad(g, _7152,
  65. mul(b, _0722)))));
  66. } break;
  67. case Fmt::RGBA_8888: {
  68. skvm::I32 R = f32_to_byte(r),
  69. G = f32_to_byte(g),
  70. B = f32_to_byte(b),
  71. A = f32_to_byte(a);
  72. R = pack(R, G, 8);
  73. B = pack(B, A, 8);
  74. R = pack(R, B, 16);
  75. store32(dst, R);
  76. } break;
  77. }
  78. }
  79. SrcoverBuilder_I32_Naive::SrcoverBuilder_I32_Naive() {
  80. skvm::Arg src = arg<int>(),
  81. dst = arg<int>();
  82. auto load = [&](skvm::Arg ptr,
  83. skvm::I32* r, skvm::I32* g, skvm::I32* b, skvm::I32* a) {
  84. skvm::I32 rgba = load32(ptr);
  85. *r = extract(rgba, 0, splat(0xff));
  86. *g = extract(rgba, 8, splat(0xff));
  87. *b = extract(rgba, 16, splat(0xff));
  88. *a = extract(rgba, 24, splat(0xff));
  89. };
  90. skvm::I32 r,g,b,a;
  91. load(src, &r,&g,&b,&a);
  92. skvm::I32 dr,dg,db,da;
  93. load(dst, &dr,&dg,&db,&da);
  94. // (xy + x)/256 is a good approximation of (xy + 127)/255
  95. //
  96. // == (d*(255-a) + d)/256
  97. // == (d*(255-a+1) )/256
  98. // == (d*(256-a ) )/256
  99. skvm::I32 invA = sub(splat(256), a);
  100. r = add(r, shr(mul(dr, invA), 8));
  101. g = add(g, shr(mul(dg, invA), 8));
  102. b = add(b, shr(mul(db, invA), 8));
  103. a = add(a, shr(mul(da, invA), 8));
  104. r = pack(r, g, 8);
  105. b = pack(b, a, 8);
  106. r = pack(r, b, 16);
  107. store32(dst, r);
  108. }
  109. SrcoverBuilder_I32::SrcoverBuilder_I32() {
  110. skvm::Arg src = arg<int>(),
  111. dst = arg<int>();
  112. auto load = [&](skvm::Arg ptr,
  113. skvm::I32* r, skvm::I32* g, skvm::I32* b, skvm::I32* a) {
  114. skvm::I32 rgba = load32(ptr);
  115. *r = bit_and(rgba, splat(0xff));
  116. *g = bytes (rgba, 0x0002);
  117. *b = bytes (rgba, 0x0003);
  118. *a = shr (rgba, 24);
  119. };
  120. skvm::I32 r,g,b,a;
  121. load(src, &r,&g,&b,&a);
  122. skvm::I32 dr,dg,db,da;
  123. load(dst, &dr,&dg,&db,&da);
  124. // (xy + x)/256 is a good approximation of (xy + 127)/255
  125. //
  126. // == (d*(255-a) + d)/256
  127. // == (d*(255-a+1) )/256
  128. // == (d*(256-a ) )/256
  129. // We're doing 8x8 bit multiplies in 32-bit lanes.
  130. // Since the inputs and results both fit in 16 bits,
  131. // we can use mul_16x2, which tends to be faster than mul.
  132. //
  133. // (The top 2 zero bytes of the inputs will also multiply
  134. // with each other to produce zero... perfect.)
  135. skvm::I32 invA = sub(splat(256), a);
  136. r = add(r, shr(mul_16x2(dr, invA), 8));
  137. g = add(g, shr(mul_16x2(dg, invA), 8));
  138. b = add(b, shr(mul_16x2(db, invA), 8));
  139. a = add(a, shr(mul_16x2(da, invA), 8));
  140. r = pack(r, g, 8);
  141. b = pack(b, a, 8);
  142. r = pack(r, b, 16);
  143. store32(dst, r);
  144. }
  145. SrcoverBuilder_I32_SWAR::SrcoverBuilder_I32_SWAR() {
  146. skvm::Arg src = arg<int>(),
  147. dst = arg<int>();
  148. // The s += d*invA adds won't overflow,
  149. // so we don't have to unpack s beyond grabbing the alpha channel.
  150. skvm::I32 s = load32(src),
  151. ax2 = bytes(s, 0x0404); // rgba -> a0a0
  152. // We'll use the same approximation math as above, this time making sure to
  153. // use both i16 multiplies to our benefit, one for r/g, the other for b/a.
  154. skvm::I32 invAx2 = sub_16x2(splat(0x01000100), ax2);
  155. skvm::I32 d = load32(dst),
  156. rb = bit_and (d, splat(0x00ff00ff)),
  157. ga = shr_16x2(d, 8);
  158. rb = shr_16x2(mul_16x2(rb, invAx2), 8); // Put the high 8 bits back in the low lane.
  159. ga = mul_16x2(ga, invAx2); // Keep the high 8 bits up high...
  160. ga = bit_clear(ga, splat(0x00ff00ff)); // ...and mask off the low bits.
  161. store32(dst, add(s, bit_or(rb, ga)));
  162. }