SkSwizzler_opts.h 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823
  1. /*
  2. * Copyright 2016 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #ifndef SkSwizzler_opts_DEFINED
  8. #define SkSwizzler_opts_DEFINED
  9. #include "include/private/SkColorData.h"
  10. #include <utility>
  11. #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
  12. #include <immintrin.h>
  13. #elif defined(SK_ARM_HAS_NEON)
  14. #include <arm_neon.h>
  15. #endif
  16. namespace SK_OPTS_NS {
  17. static void RGBA_to_rgbA_portable(uint32_t* dst, const uint32_t* src, int count) {
  18. for (int i = 0; i < count; i++) {
  19. uint8_t a = (src[i] >> 24) & 0xFF,
  20. b = (src[i] >> 16) & 0xFF,
  21. g = (src[i] >> 8) & 0xFF,
  22. r = (src[i] >> 0) & 0xFF;
  23. b = (b*a+127)/255;
  24. g = (g*a+127)/255;
  25. r = (r*a+127)/255;
  26. dst[i] = (uint32_t)a << 24
  27. | (uint32_t)b << 16
  28. | (uint32_t)g << 8
  29. | (uint32_t)r << 0;
  30. }
  31. }
  32. static void RGBA_to_bgrA_portable(uint32_t* dst, const uint32_t* src, int count) {
  33. for (int i = 0; i < count; i++) {
  34. uint8_t a = (src[i] >> 24) & 0xFF,
  35. b = (src[i] >> 16) & 0xFF,
  36. g = (src[i] >> 8) & 0xFF,
  37. r = (src[i] >> 0) & 0xFF;
  38. b = (b*a+127)/255;
  39. g = (g*a+127)/255;
  40. r = (r*a+127)/255;
  41. dst[i] = (uint32_t)a << 24
  42. | (uint32_t)r << 16
  43. | (uint32_t)g << 8
  44. | (uint32_t)b << 0;
  45. }
  46. }
  47. static void RGBA_to_BGRA_portable(uint32_t* dst, const uint32_t* src, int count) {
  48. for (int i = 0; i < count; i++) {
  49. uint8_t a = (src[i] >> 24) & 0xFF,
  50. b = (src[i] >> 16) & 0xFF,
  51. g = (src[i] >> 8) & 0xFF,
  52. r = (src[i] >> 0) & 0xFF;
  53. dst[i] = (uint32_t)a << 24
  54. | (uint32_t)r << 16
  55. | (uint32_t)g << 8
  56. | (uint32_t)b << 0;
  57. }
  58. }
  59. static void RGB_to_RGB1_portable(uint32_t dst[], const uint8_t* src, int count) {
  60. for (int i = 0; i < count; i++) {
  61. uint8_t r = src[0],
  62. g = src[1],
  63. b = src[2];
  64. src += 3;
  65. dst[i] = (uint32_t)0xFF << 24
  66. | (uint32_t)b << 16
  67. | (uint32_t)g << 8
  68. | (uint32_t)r << 0;
  69. }
  70. }
  71. static void RGB_to_BGR1_portable(uint32_t dst[], const uint8_t* src, int count) {
  72. for (int i = 0; i < count; i++) {
  73. uint8_t r = src[0],
  74. g = src[1],
  75. b = src[2];
  76. src += 3;
  77. dst[i] = (uint32_t)0xFF << 24
  78. | (uint32_t)r << 16
  79. | (uint32_t)g << 8
  80. | (uint32_t)b << 0;
  81. }
  82. }
  83. static void gray_to_RGB1_portable(uint32_t dst[], const uint8_t* src, int count) {
  84. for (int i = 0; i < count; i++) {
  85. dst[i] = (uint32_t)0xFF << 24
  86. | (uint32_t)src[i] << 16
  87. | (uint32_t)src[i] << 8
  88. | (uint32_t)src[i] << 0;
  89. }
  90. }
  91. static void grayA_to_RGBA_portable(uint32_t dst[], const uint8_t* src, int count) {
  92. for (int i = 0; i < count; i++) {
  93. uint8_t g = src[0],
  94. a = src[1];
  95. src += 2;
  96. dst[i] = (uint32_t)a << 24
  97. | (uint32_t)g << 16
  98. | (uint32_t)g << 8
  99. | (uint32_t)g << 0;
  100. }
  101. }
  102. static void grayA_to_rgbA_portable(uint32_t dst[], const uint8_t* src, int count) {
  103. for (int i = 0; i < count; i++) {
  104. uint8_t g = src[0],
  105. a = src[1];
  106. src += 2;
  107. g = (g*a+127)/255;
  108. dst[i] = (uint32_t)a << 24
  109. | (uint32_t)g << 16
  110. | (uint32_t)g << 8
  111. | (uint32_t)g << 0;
  112. }
  113. }
  114. static void inverted_CMYK_to_RGB1_portable(uint32_t* dst, const uint32_t* src, int count) {
  115. for (int i = 0; i < count; i++) {
  116. uint8_t k = (src[i] >> 24) & 0xFF,
  117. y = (src[i] >> 16) & 0xFF,
  118. m = (src[i] >> 8) & 0xFF,
  119. c = (src[i] >> 0) & 0xFF;
  120. // See comments in SkSwizzler.cpp for details on the conversion formula.
  121. uint8_t b = (y*k+127)/255,
  122. g = (m*k+127)/255,
  123. r = (c*k+127)/255;
  124. dst[i] = (uint32_t)0xFF << 24
  125. | (uint32_t) b << 16
  126. | (uint32_t) g << 8
  127. | (uint32_t) r << 0;
  128. }
  129. }
  130. static void inverted_CMYK_to_BGR1_portable(uint32_t* dst, const uint32_t* src, int count) {
  131. for (int i = 0; i < count; i++) {
  132. uint8_t k = (src[i] >> 24) & 0xFF,
  133. y = (src[i] >> 16) & 0xFF,
  134. m = (src[i] >> 8) & 0xFF,
  135. c = (src[i] >> 0) & 0xFF;
  136. uint8_t b = (y*k+127)/255,
  137. g = (m*k+127)/255,
  138. r = (c*k+127)/255;
  139. dst[i] = (uint32_t)0xFF << 24
  140. | (uint32_t) r << 16
  141. | (uint32_t) g << 8
  142. | (uint32_t) b << 0;
  143. }
  144. }
  145. #if defined(SK_ARM_HAS_NEON)
  146. // Rounded divide by 255, (x + 127) / 255
  147. static uint8x8_t div255_round(uint16x8_t x) {
  148. // result = (x + 127) / 255
  149. // result = (x + 127) / 256 + error1
  150. //
  151. // error1 = (x + 127) / (255 * 256)
  152. // error1 = (x + 127) / (256 * 256) + error2
  153. //
  154. // error2 = (x + 127) / (255 * 256 * 256)
  155. //
  156. // The maximum value of error2 is too small to matter. Thus:
  157. // result = (x + 127) / 256 + (x + 127) / (256 * 256)
  158. // result = ((x + 127) / 256 + x + 127) / 256
  159. // result = ((x + 127) >> 8 + x + 127) >> 8
  160. //
  161. // Use >>> to represent "rounded right shift" which, conveniently,
  162. // NEON supports in one instruction.
  163. // result = ((x >>> 8) + x) >>> 8
  164. //
  165. // Note that the second right shift is actually performed as an
  166. // "add, round, and narrow back to 8-bits" instruction.
  167. return vraddhn_u16(x, vrshrq_n_u16(x, 8));
  168. }
  169. // Scale a byte by another, (x * y + 127) / 255
  170. static uint8x8_t scale(uint8x8_t x, uint8x8_t y) {
  171. return div255_round(vmull_u8(x, y));
  172. }
  173. template <bool kSwapRB>
  174. static void premul_should_swapRB(uint32_t* dst, const uint32_t* src, int count) {
  175. while (count >= 8) {
  176. // Load 8 pixels.
  177. uint8x8x4_t rgba = vld4_u8((const uint8_t*) src);
  178. uint8x8_t a = rgba.val[3],
  179. b = rgba.val[2],
  180. g = rgba.val[1],
  181. r = rgba.val[0];
  182. // Premultiply.
  183. b = scale(b, a);
  184. g = scale(g, a);
  185. r = scale(r, a);
  186. // Store 8 premultiplied pixels.
  187. if (kSwapRB) {
  188. rgba.val[2] = r;
  189. rgba.val[1] = g;
  190. rgba.val[0] = b;
  191. } else {
  192. rgba.val[2] = b;
  193. rgba.val[1] = g;
  194. rgba.val[0] = r;
  195. }
  196. vst4_u8((uint8_t*) dst, rgba);
  197. src += 8;
  198. dst += 8;
  199. count -= 8;
  200. }
  201. // Call portable code to finish up the tail of [0,8) pixels.
  202. auto proc = kSwapRB ? RGBA_to_bgrA_portable : RGBA_to_rgbA_portable;
  203. proc(dst, src, count);
  204. }
  205. /*not static*/ inline void RGBA_to_rgbA(uint32_t* dst, const uint32_t* src, int count) {
  206. premul_should_swapRB<false>(dst, src, count);
  207. }
  208. /*not static*/ inline void RGBA_to_bgrA(uint32_t* dst, const uint32_t* src, int count) {
  209. premul_should_swapRB<true>(dst, src, count);
  210. }
  211. /*not static*/ inline void RGBA_to_BGRA(uint32_t* dst, const uint32_t* src, int count) {
  212. using std::swap;
  213. while (count >= 16) {
  214. // Load 16 pixels.
  215. uint8x16x4_t rgba = vld4q_u8((const uint8_t*) src);
  216. // Swap r and b.
  217. swap(rgba.val[0], rgba.val[2]);
  218. // Store 16 pixels.
  219. vst4q_u8((uint8_t*) dst, rgba);
  220. src += 16;
  221. dst += 16;
  222. count -= 16;
  223. }
  224. if (count >= 8) {
  225. // Load 8 pixels.
  226. uint8x8x4_t rgba = vld4_u8((const uint8_t*) src);
  227. // Swap r and b.
  228. swap(rgba.val[0], rgba.val[2]);
  229. // Store 8 pixels.
  230. vst4_u8((uint8_t*) dst, rgba);
  231. src += 8;
  232. dst += 8;
  233. count -= 8;
  234. }
  235. RGBA_to_BGRA_portable(dst, src, count);
  236. }
  237. template <bool kSwapRB>
  238. static void insert_alpha_should_swaprb(uint32_t dst[], const uint8_t* src, int count) {
  239. while (count >= 16) {
  240. // Load 16 pixels.
  241. uint8x16x3_t rgb = vld3q_u8(src);
  242. // Insert an opaque alpha channel and swap if needed.
  243. uint8x16x4_t rgba;
  244. if (kSwapRB) {
  245. rgba.val[0] = rgb.val[2];
  246. rgba.val[2] = rgb.val[0];
  247. } else {
  248. rgba.val[0] = rgb.val[0];
  249. rgba.val[2] = rgb.val[2];
  250. }
  251. rgba.val[1] = rgb.val[1];
  252. rgba.val[3] = vdupq_n_u8(0xFF);
  253. // Store 16 pixels.
  254. vst4q_u8((uint8_t*) dst, rgba);
  255. src += 16*3;
  256. dst += 16;
  257. count -= 16;
  258. }
  259. if (count >= 8) {
  260. // Load 8 pixels.
  261. uint8x8x3_t rgb = vld3_u8(src);
  262. // Insert an opaque alpha channel and swap if needed.
  263. uint8x8x4_t rgba;
  264. if (kSwapRB) {
  265. rgba.val[0] = rgb.val[2];
  266. rgba.val[2] = rgb.val[0];
  267. } else {
  268. rgba.val[0] = rgb.val[0];
  269. rgba.val[2] = rgb.val[2];
  270. }
  271. rgba.val[1] = rgb.val[1];
  272. rgba.val[3] = vdup_n_u8(0xFF);
  273. // Store 8 pixels.
  274. vst4_u8((uint8_t*) dst, rgba);
  275. src += 8*3;
  276. dst += 8;
  277. count -= 8;
  278. }
  279. // Call portable code to finish up the tail of [0,8) pixels.
  280. auto proc = kSwapRB ? RGB_to_BGR1_portable : RGB_to_RGB1_portable;
  281. proc(dst, src, count);
  282. }
  283. /*not static*/ inline void RGB_to_RGB1(uint32_t dst[], const uint8_t* src, int count) {
  284. insert_alpha_should_swaprb<false>(dst, src, count);
  285. }
  286. /*not static*/ inline void RGB_to_BGR1(uint32_t dst[], const uint8_t* src, int count) {
  287. insert_alpha_should_swaprb<true>(dst, src, count);
  288. }
  289. /*not static*/ inline void gray_to_RGB1(uint32_t dst[], const uint8_t* src, int count) {
  290. while (count >= 16) {
  291. // Load 16 pixels.
  292. uint8x16_t gray = vld1q_u8(src);
  293. // Set each of the color channels.
  294. uint8x16x4_t rgba;
  295. rgba.val[0] = gray;
  296. rgba.val[1] = gray;
  297. rgba.val[2] = gray;
  298. rgba.val[3] = vdupq_n_u8(0xFF);
  299. // Store 16 pixels.
  300. vst4q_u8((uint8_t*) dst, rgba);
  301. src += 16;
  302. dst += 16;
  303. count -= 16;
  304. }
  305. if (count >= 8) {
  306. // Load 8 pixels.
  307. uint8x8_t gray = vld1_u8(src);
  308. // Set each of the color channels.
  309. uint8x8x4_t rgba;
  310. rgba.val[0] = gray;
  311. rgba.val[1] = gray;
  312. rgba.val[2] = gray;
  313. rgba.val[3] = vdup_n_u8(0xFF);
  314. // Store 8 pixels.
  315. vst4_u8((uint8_t*) dst, rgba);
  316. src += 8;
  317. dst += 8;
  318. count -= 8;
  319. }
  320. gray_to_RGB1_portable(dst, src, count);
  321. }
  322. template <bool kPremul>
  323. static void expand_grayA(uint32_t dst[], const uint8_t* src, int count) {
  324. while (count >= 16) {
  325. // Load 16 pixels.
  326. uint8x16x2_t ga = vld2q_u8(src);
  327. // Premultiply if requested.
  328. if (kPremul) {
  329. ga.val[0] = vcombine_u8(
  330. scale(vget_low_u8(ga.val[0]), vget_low_u8(ga.val[1])),
  331. scale(vget_high_u8(ga.val[0]), vget_high_u8(ga.val[1])));
  332. }
  333. // Set each of the color channels.
  334. uint8x16x4_t rgba;
  335. rgba.val[0] = ga.val[0];
  336. rgba.val[1] = ga.val[0];
  337. rgba.val[2] = ga.val[0];
  338. rgba.val[3] = ga.val[1];
  339. // Store 16 pixels.
  340. vst4q_u8((uint8_t*) dst, rgba);
  341. src += 16*2;
  342. dst += 16;
  343. count -= 16;
  344. }
  345. if (count >= 8) {
  346. // Load 8 pixels.
  347. uint8x8x2_t ga = vld2_u8(src);
  348. // Premultiply if requested.
  349. if (kPremul) {
  350. ga.val[0] = scale(ga.val[0], ga.val[1]);
  351. }
  352. // Set each of the color channels.
  353. uint8x8x4_t rgba;
  354. rgba.val[0] = ga.val[0];
  355. rgba.val[1] = ga.val[0];
  356. rgba.val[2] = ga.val[0];
  357. rgba.val[3] = ga.val[1];
  358. // Store 8 pixels.
  359. vst4_u8((uint8_t*) dst, rgba);
  360. src += 8*2;
  361. dst += 8;
  362. count -= 8;
  363. }
  364. auto proc = kPremul ? grayA_to_rgbA_portable : grayA_to_RGBA_portable;
  365. proc(dst, src, count);
  366. }
  367. /*not static*/ inline void grayA_to_RGBA(uint32_t dst[], const uint8_t* src, int count) {
  368. expand_grayA<false>(dst, src, count);
  369. }
  370. /*not static*/ inline void grayA_to_rgbA(uint32_t dst[], const uint8_t* src, int count) {
  371. expand_grayA<true>(dst, src, count);
  372. }
  373. enum Format { kRGB1, kBGR1 };
  374. template <Format format>
  375. static void inverted_cmyk_to(uint32_t* dst, const uint32_t* src, int count) {
  376. while (count >= 8) {
  377. // Load 8 cmyk pixels.
  378. uint8x8x4_t pixels = vld4_u8((const uint8_t*) src);
  379. uint8x8_t k = pixels.val[3],
  380. y = pixels.val[2],
  381. m = pixels.val[1],
  382. c = pixels.val[0];
  383. // Scale to r, g, b.
  384. uint8x8_t b = scale(y, k);
  385. uint8x8_t g = scale(m, k);
  386. uint8x8_t r = scale(c, k);
  387. // Store 8 rgba pixels.
  388. if (kBGR1 == format) {
  389. pixels.val[3] = vdup_n_u8(0xFF);
  390. pixels.val[2] = r;
  391. pixels.val[1] = g;
  392. pixels.val[0] = b;
  393. } else {
  394. pixels.val[3] = vdup_n_u8(0xFF);
  395. pixels.val[2] = b;
  396. pixels.val[1] = g;
  397. pixels.val[0] = r;
  398. }
  399. vst4_u8((uint8_t*) dst, pixels);
  400. src += 8;
  401. dst += 8;
  402. count -= 8;
  403. }
  404. auto proc = (kBGR1 == format) ? inverted_CMYK_to_BGR1_portable : inverted_CMYK_to_RGB1_portable;
  405. proc(dst, src, count);
  406. }
  407. /*not static*/ inline void inverted_CMYK_to_RGB1(uint32_t dst[], const uint32_t* src, int count) {
  408. inverted_cmyk_to<kRGB1>(dst, src, count);
  409. }
  410. /*not static*/ inline void inverted_CMYK_to_BGR1(uint32_t dst[], const uint32_t* src, int count) {
  411. inverted_cmyk_to<kBGR1>(dst, src, count);
  412. }
  413. #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
  414. // Scale a byte by another.
  415. // Inputs are stored in 16-bit lanes, but are not larger than 8-bits.
  416. static __m128i scale(__m128i x, __m128i y) {
  417. const __m128i _128 = _mm_set1_epi16(128);
  418. const __m128i _257 = _mm_set1_epi16(257);
  419. // (x+127)/255 == ((x+128)*257)>>16 for 0 <= x <= 255*255.
  420. return _mm_mulhi_epu16(_mm_add_epi16(_mm_mullo_epi16(x, y), _128), _257);
  421. }
  422. template <bool kSwapRB>
  423. static void premul_should_swapRB(uint32_t* dst, const uint32_t* src, int count) {
  424. auto premul8 = [](__m128i* lo, __m128i* hi) {
  425. const __m128i zeros = _mm_setzero_si128();
  426. __m128i planar;
  427. if (kSwapRB) {
  428. planar = _mm_setr_epi8(2,6,10,14, 1,5,9,13, 0,4,8,12, 3,7,11,15);
  429. } else {
  430. planar = _mm_setr_epi8(0,4,8,12, 1,5,9,13, 2,6,10,14, 3,7,11,15);
  431. }
  432. // Swizzle the pixels to 8-bit planar.
  433. *lo = _mm_shuffle_epi8(*lo, planar); // rrrrgggg bbbbaaaa
  434. *hi = _mm_shuffle_epi8(*hi, planar); // RRRRGGGG BBBBAAAA
  435. __m128i rg = _mm_unpacklo_epi32(*lo, *hi), // rrrrRRRR ggggGGGG
  436. ba = _mm_unpackhi_epi32(*lo, *hi); // bbbbBBBB aaaaAAAA
  437. // Unpack to 16-bit planar.
  438. __m128i r = _mm_unpacklo_epi8(rg, zeros), // r_r_r_r_ R_R_R_R_
  439. g = _mm_unpackhi_epi8(rg, zeros), // g_g_g_g_ G_G_G_G_
  440. b = _mm_unpacklo_epi8(ba, zeros), // b_b_b_b_ B_B_B_B_
  441. a = _mm_unpackhi_epi8(ba, zeros); // a_a_a_a_ A_A_A_A_
  442. // Premultiply!
  443. r = scale(r, a);
  444. g = scale(g, a);
  445. b = scale(b, a);
  446. // Repack into interlaced pixels.
  447. rg = _mm_or_si128(r, _mm_slli_epi16(g, 8)); // rgrgrgrg RGRGRGRG
  448. ba = _mm_or_si128(b, _mm_slli_epi16(a, 8)); // babababa BABABABA
  449. *lo = _mm_unpacklo_epi16(rg, ba); // rgbargba rgbargba
  450. *hi = _mm_unpackhi_epi16(rg, ba); // RGBARGBA RGBARGBA
  451. };
  452. while (count >= 8) {
  453. __m128i lo = _mm_loadu_si128((const __m128i*) (src + 0)),
  454. hi = _mm_loadu_si128((const __m128i*) (src + 4));
  455. premul8(&lo, &hi);
  456. _mm_storeu_si128((__m128i*) (dst + 0), lo);
  457. _mm_storeu_si128((__m128i*) (dst + 4), hi);
  458. src += 8;
  459. dst += 8;
  460. count -= 8;
  461. }
  462. if (count >= 4) {
  463. __m128i lo = _mm_loadu_si128((const __m128i*) src),
  464. hi = _mm_setzero_si128();
  465. premul8(&lo, &hi);
  466. _mm_storeu_si128((__m128i*) dst, lo);
  467. src += 4;
  468. dst += 4;
  469. count -= 4;
  470. }
  471. // Call portable code to finish up the tail of [0,4) pixels.
  472. auto proc = kSwapRB ? RGBA_to_bgrA_portable : RGBA_to_rgbA_portable;
  473. proc(dst, src, count);
  474. }
  475. /*not static*/ inline void RGBA_to_rgbA(uint32_t* dst, const uint32_t* src, int count) {
  476. premul_should_swapRB<false>(dst, src, count);
  477. }
  478. /*not static*/ inline void RGBA_to_bgrA(uint32_t* dst, const uint32_t* src, int count) {
  479. premul_should_swapRB<true>(dst, src, count);
  480. }
  481. /*not static*/ inline void RGBA_to_BGRA(uint32_t* dst, const uint32_t* src, int count) {
  482. const __m128i swapRB = _mm_setr_epi8(2,1,0,3, 6,5,4,7, 10,9,8,11, 14,13,12,15);
  483. while (count >= 4) {
  484. __m128i rgba = _mm_loadu_si128((const __m128i*) src);
  485. __m128i bgra = _mm_shuffle_epi8(rgba, swapRB);
  486. _mm_storeu_si128((__m128i*) dst, bgra);
  487. src += 4;
  488. dst += 4;
  489. count -= 4;
  490. }
  491. RGBA_to_BGRA_portable(dst, src, count);
  492. }
  493. template <bool kSwapRB>
  494. static void insert_alpha_should_swaprb(uint32_t dst[], const uint8_t* src, int count) {
  495. const __m128i alphaMask = _mm_set1_epi32(0xFF000000);
  496. __m128i expand;
  497. const uint8_t X = 0xFF; // Used a placeholder. The value of X is irrelevant.
  498. if (kSwapRB) {
  499. expand = _mm_setr_epi8(2,1,0,X, 5,4,3,X, 8,7,6,X, 11,10,9,X);
  500. } else {
  501. expand = _mm_setr_epi8(0,1,2,X, 3,4,5,X, 6,7,8,X, 9,10,11,X);
  502. }
  503. while (count >= 6) {
  504. // Load a vector. While this actually contains 5 pixels plus an
  505. // extra component, we will discard all but the first four pixels on
  506. // this iteration.
  507. __m128i rgb = _mm_loadu_si128((const __m128i*) src);
  508. // Expand the first four pixels to RGBX and then mask to RGB(FF).
  509. __m128i rgba = _mm_or_si128(_mm_shuffle_epi8(rgb, expand), alphaMask);
  510. // Store 4 pixels.
  511. _mm_storeu_si128((__m128i*) dst, rgba);
  512. src += 4*3;
  513. dst += 4;
  514. count -= 4;
  515. }
  516. // Call portable code to finish up the tail of [0,4) pixels.
  517. auto proc = kSwapRB ? RGB_to_BGR1_portable : RGB_to_RGB1_portable;
  518. proc(dst, src, count);
  519. }
  520. /*not static*/ inline void RGB_to_RGB1(uint32_t dst[], const uint8_t* src, int count) {
  521. insert_alpha_should_swaprb<false>(dst, src, count);
  522. }
  523. /*not static*/ inline void RGB_to_BGR1(uint32_t dst[], const uint8_t* src, int count) {
  524. insert_alpha_should_swaprb<true>(dst, src, count);
  525. }
  526. /*not static*/ inline void gray_to_RGB1(uint32_t dst[], const uint8_t* src, int count) {
  527. const __m128i alphas = _mm_set1_epi8((uint8_t) 0xFF);
  528. while (count >= 16) {
  529. __m128i grays = _mm_loadu_si128((const __m128i*) src);
  530. __m128i gg_lo = _mm_unpacklo_epi8(grays, grays);
  531. __m128i gg_hi = _mm_unpackhi_epi8(grays, grays);
  532. __m128i ga_lo = _mm_unpacklo_epi8(grays, alphas);
  533. __m128i ga_hi = _mm_unpackhi_epi8(grays, alphas);
  534. __m128i ggga0 = _mm_unpacklo_epi16(gg_lo, ga_lo);
  535. __m128i ggga1 = _mm_unpackhi_epi16(gg_lo, ga_lo);
  536. __m128i ggga2 = _mm_unpacklo_epi16(gg_hi, ga_hi);
  537. __m128i ggga3 = _mm_unpackhi_epi16(gg_hi, ga_hi);
  538. _mm_storeu_si128((__m128i*) (dst + 0), ggga0);
  539. _mm_storeu_si128((__m128i*) (dst + 4), ggga1);
  540. _mm_storeu_si128((__m128i*) (dst + 8), ggga2);
  541. _mm_storeu_si128((__m128i*) (dst + 12), ggga3);
  542. src += 16;
  543. dst += 16;
  544. count -= 16;
  545. }
  546. gray_to_RGB1_portable(dst, src, count);
  547. }
  548. /*not static*/ inline void grayA_to_RGBA(uint32_t dst[], const uint8_t* src, int count) {
  549. while (count >= 8) {
  550. __m128i ga = _mm_loadu_si128((const __m128i*) src);
  551. __m128i gg = _mm_or_si128(_mm_and_si128(ga, _mm_set1_epi16(0x00FF)),
  552. _mm_slli_epi16(ga, 8));
  553. __m128i ggga_lo = _mm_unpacklo_epi16(gg, ga);
  554. __m128i ggga_hi = _mm_unpackhi_epi16(gg, ga);
  555. _mm_storeu_si128((__m128i*) (dst + 0), ggga_lo);
  556. _mm_storeu_si128((__m128i*) (dst + 4), ggga_hi);
  557. src += 8*2;
  558. dst += 8;
  559. count -= 8;
  560. }
  561. grayA_to_RGBA_portable(dst, src, count);
  562. }
  563. /*not static*/ inline void grayA_to_rgbA(uint32_t dst[], const uint8_t* src, int count) {
  564. while (count >= 8) {
  565. __m128i grayA = _mm_loadu_si128((const __m128i*) src);
  566. __m128i g0 = _mm_and_si128(grayA, _mm_set1_epi16(0x00FF));
  567. __m128i a0 = _mm_srli_epi16(grayA, 8);
  568. // Premultiply
  569. g0 = scale(g0, a0);
  570. __m128i gg = _mm_or_si128(g0, _mm_slli_epi16(g0, 8));
  571. __m128i ga = _mm_or_si128(g0, _mm_slli_epi16(a0, 8));
  572. __m128i ggga_lo = _mm_unpacklo_epi16(gg, ga);
  573. __m128i ggga_hi = _mm_unpackhi_epi16(gg, ga);
  574. _mm_storeu_si128((__m128i*) (dst + 0), ggga_lo);
  575. _mm_storeu_si128((__m128i*) (dst + 4), ggga_hi);
  576. src += 8*2;
  577. dst += 8;
  578. count -= 8;
  579. }
  580. grayA_to_rgbA_portable(dst, src, count);
  581. }
  582. enum Format { kRGB1, kBGR1 };
  583. template <Format format>
  584. static void inverted_cmyk_to(uint32_t* dst, const uint32_t* src, int count) {
  585. auto convert8 = [](__m128i* lo, __m128i* hi) {
  586. const __m128i zeros = _mm_setzero_si128();
  587. __m128i planar;
  588. if (kBGR1 == format) {
  589. planar = _mm_setr_epi8(2,6,10,14, 1,5,9,13, 0,4,8,12, 3,7,11,15);
  590. } else {
  591. planar = _mm_setr_epi8(0,4,8,12, 1,5,9,13, 2,6,10,14, 3,7,11,15);
  592. }
  593. // Swizzle the pixels to 8-bit planar.
  594. *lo = _mm_shuffle_epi8(*lo, planar); // ccccmmmm yyyykkkk
  595. *hi = _mm_shuffle_epi8(*hi, planar); // CCCCMMMM YYYYKKKK
  596. __m128i cm = _mm_unpacklo_epi32(*lo, *hi), // ccccCCCC mmmmMMMM
  597. yk = _mm_unpackhi_epi32(*lo, *hi); // yyyyYYYY kkkkKKKK
  598. // Unpack to 16-bit planar.
  599. __m128i c = _mm_unpacklo_epi8(cm, zeros), // c_c_c_c_ C_C_C_C_
  600. m = _mm_unpackhi_epi8(cm, zeros), // m_m_m_m_ M_M_M_M_
  601. y = _mm_unpacklo_epi8(yk, zeros), // y_y_y_y_ Y_Y_Y_Y_
  602. k = _mm_unpackhi_epi8(yk, zeros); // k_k_k_k_ K_K_K_K_
  603. // Scale to r, g, b.
  604. __m128i r = scale(c, k),
  605. g = scale(m, k),
  606. b = scale(y, k);
  607. // Repack into interlaced pixels.
  608. __m128i rg = _mm_or_si128(r, _mm_slli_epi16(g, 8)), // rgrgrgrg RGRGRGRG
  609. ba = _mm_or_si128(b, _mm_set1_epi16((uint16_t) 0xFF00)); // b1b1b1b1 B1B1B1B1
  610. *lo = _mm_unpacklo_epi16(rg, ba); // rgbargba rgbargba
  611. *hi = _mm_unpackhi_epi16(rg, ba); // RGB1RGB1 RGB1RGB1
  612. };
  613. while (count >= 8) {
  614. __m128i lo = _mm_loadu_si128((const __m128i*) (src + 0)),
  615. hi = _mm_loadu_si128((const __m128i*) (src + 4));
  616. convert8(&lo, &hi);
  617. _mm_storeu_si128((__m128i*) (dst + 0), lo);
  618. _mm_storeu_si128((__m128i*) (dst + 4), hi);
  619. src += 8;
  620. dst += 8;
  621. count -= 8;
  622. }
  623. if (count >= 4) {
  624. __m128i lo = _mm_loadu_si128((const __m128i*) src),
  625. hi = _mm_setzero_si128();
  626. convert8(&lo, &hi);
  627. _mm_storeu_si128((__m128i*) dst, lo);
  628. src += 4;
  629. dst += 4;
  630. count -= 4;
  631. }
  632. auto proc = (kBGR1 == format) ? inverted_CMYK_to_BGR1_portable : inverted_CMYK_to_RGB1_portable;
  633. proc(dst, src, count);
  634. }
  635. /*not static*/ inline void inverted_CMYK_to_RGB1(uint32_t dst[], const uint32_t* src, int count) {
  636. inverted_cmyk_to<kRGB1>(dst, src, count);
  637. }
  638. /*not static*/ inline void inverted_CMYK_to_BGR1(uint32_t dst[], const uint32_t* src, int count) {
  639. inverted_cmyk_to<kBGR1>(dst, src, count);
  640. }
  641. #else
  642. /*not static*/ inline void RGBA_to_rgbA(uint32_t* dst, const uint32_t* src, int count) {
  643. RGBA_to_rgbA_portable(dst, src, count);
  644. }
  645. /*not static*/ inline void RGBA_to_bgrA(uint32_t* dst, const uint32_t* src, int count) {
  646. RGBA_to_bgrA_portable(dst, src, count);
  647. }
  648. /*not static*/ inline void RGBA_to_BGRA(uint32_t* dst, const uint32_t* src, int count) {
  649. RGBA_to_BGRA_portable(dst, src, count);
  650. }
  651. /*not static*/ inline void RGB_to_RGB1(uint32_t dst[], const uint8_t* src, int count) {
  652. RGB_to_RGB1_portable(dst, src, count);
  653. }
  654. /*not static*/ inline void RGB_to_BGR1(uint32_t dst[], const uint8_t* src, int count) {
  655. RGB_to_BGR1_portable(dst, src, count);
  656. }
  657. /*not static*/ inline void gray_to_RGB1(uint32_t dst[], const uint8_t* src, int count) {
  658. gray_to_RGB1_portable(dst, src, count);
  659. }
  660. /*not static*/ inline void grayA_to_RGBA(uint32_t dst[], const uint8_t* src, int count) {
  661. grayA_to_RGBA_portable(dst, src, count);
  662. }
  663. /*not static*/ inline void grayA_to_rgbA(uint32_t dst[], const uint8_t* src, int count) {
  664. grayA_to_rgbA_portable(dst, src, count);
  665. }
  666. /*not static*/ inline void inverted_CMYK_to_RGB1(uint32_t dst[], const uint32_t* src, int count) {
  667. inverted_CMYK_to_RGB1_portable(dst, src, count);
  668. }
  669. /*not static*/ inline void inverted_CMYK_to_BGR1(uint32_t dst[], const uint32_t* src, int count) {
  670. inverted_CMYK_to_BGR1_portable(dst, src, count);
  671. }
  672. #endif
  673. }
  674. #endif // SkSwizzler_opts_DEFINED