filter_vsx_intrinsics.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768
  1. /* filter_vsx_intrinsics.c - PowerPC optimised filter functions
  2. *
  3. * Copyright (c) 2018 Cosmin Truta
  4. * Copyright (c) 2017 Glenn Randers-Pehrson
  5. * Written by Vadim Barkov, 2017.
  6. *
  7. * This code is released under the libpng license.
  8. * For conditions of distribution and use, see the disclaimer
  9. * and license in png.h
  10. */
  11. #include <stdio.h>
  12. #include <stdint.h>
  13. #include "../pngpriv.h"
  14. #ifdef PNG_READ_SUPPORTED
  15. /* This code requires -maltivec and -mvsx on the command line: */
  16. #if PNG_POWERPC_VSX_IMPLEMENTATION == 1 /* intrinsics code from pngpriv.h */
  17. #include <altivec.h>
  18. #if PNG_POWERPC_VSX_OPT > 0
  19. #ifndef __VSX__
  20. # error "This code requires VSX support (POWER7 and later). Please provide -mvsx compiler flag."
  21. #endif
  22. #define vec_ld_unaligned(vec,data) vec = vec_vsx_ld(0,data)
  23. #define vec_st_unaligned(vec,data) vec_vsx_st(vec,0,data)
  24. /* Functions in this file look at most 3 pixels (a,b,c) to predict the 4th (d).
  25. * They're positioned like this:
  26. * prev: c b
  27. * row: a d
  28. * The Sub filter predicts d=a, Avg d=(a+b)/2, and Paeth predicts d to be
  29. * whichever of a, b, or c is closest to p=a+b-c.
  30. * ( this is taken from ../intel/filter_sse2_intrinsics.c )
  31. */
  32. #define vsx_declare_common_vars(row_info,row,prev_row,offset) \
  33. png_byte i;\
  34. png_bytep rp = row + offset;\
  35. png_const_bytep pp = prev_row;\
  36. size_t unaligned_top = 16 - (((size_t)rp % 16));\
  37. size_t istop;\
  38. if(unaligned_top == 16)\
  39. unaligned_top = 0;\
  40. istop = row_info->rowbytes;\
  41. if((unaligned_top < istop))\
  42. istop -= unaligned_top;\
  43. else{\
  44. unaligned_top = istop;\
  45. istop = 0;\
  46. }
  47. void png_read_filter_row_up_vsx(png_row_infop row_info, png_bytep row,
  48. png_const_bytep prev_row)
  49. {
  50. vector unsigned char rp_vec;
  51. vector unsigned char pp_vec;
  52. vsx_declare_common_vars(row_info,row,prev_row,0)
  53. /* Altivec operations require 16-byte aligned data
  54. * but input can be unaligned. So we calculate
  55. * unaligned part as usual.
  56. */
  57. for (i = 0; i < unaligned_top; i++)
  58. {
  59. *rp = (png_byte)(((int)(*rp) + (int)(*pp++)) & 0xff);
  60. rp++;
  61. }
  62. /* Using SIMD while we can */
  63. while( istop >= 16 )
  64. {
  65. rp_vec = vec_ld(0,rp);
  66. vec_ld_unaligned(pp_vec,pp);
  67. rp_vec = vec_add(rp_vec,pp_vec);
  68. vec_st(rp_vec,0,rp);
  69. pp += 16;
  70. rp += 16;
  71. istop -= 16;
  72. }
  73. if(istop > 0)
  74. {
  75. /* If byte count of row is not divisible by 16
  76. * we will process remaining part as usual
  77. */
  78. for (i = 0; i < istop; i++)
  79. {
  80. *rp = (png_byte)(((int)(*rp) + (int)(*pp++)) & 0xff);
  81. rp++;
  82. }
  83. }
  84. }
  85. static const vector unsigned char VSX_LEFTSHIFTED1_4 = {16,16,16,16, 0, 1, 2, 3,16,16,16,16,16,16,16,16};
  86. static const vector unsigned char VSX_LEFTSHIFTED2_4 = {16,16,16,16,16,16,16,16, 4, 5, 6, 7,16,16,16,16};
  87. static const vector unsigned char VSX_LEFTSHIFTED3_4 = {16,16,16,16,16,16,16,16,16,16,16,16, 8, 9,10,11};
  88. static const vector unsigned char VSX_LEFTSHIFTED1_3 = {16,16,16, 0, 1, 2,16,16,16,16,16,16,16,16,16,16};
  89. static const vector unsigned char VSX_LEFTSHIFTED2_3 = {16,16,16,16,16,16, 3, 4, 5,16,16,16,16,16,16,16};
  90. static const vector unsigned char VSX_LEFTSHIFTED3_3 = {16,16,16,16,16,16,16,16,16, 6, 7, 8,16,16,16,16};
  91. static const vector unsigned char VSX_LEFTSHIFTED4_3 = {16,16,16,16,16,16,16,16,16,16,16,16, 9,10,11,16};
  92. static const vector unsigned char VSX_NOT_SHIFTED1_4 = {16,16,16,16, 4, 5, 6, 7,16,16,16,16,16,16,16,16};
  93. static const vector unsigned char VSX_NOT_SHIFTED2_4 = {16,16,16,16,16,16,16,16, 8, 9,10,11,16,16,16,16};
  94. static const vector unsigned char VSX_NOT_SHIFTED3_4 = {16,16,16,16,16,16,16,16,16,16,16,16,12,13,14,15};
  95. static const vector unsigned char VSX_NOT_SHIFTED1_3 = {16,16,16, 3, 4, 5,16,16,16,16,16,16,16,16,16,16};
  96. static const vector unsigned char VSX_NOT_SHIFTED2_3 = {16,16,16,16,16,16, 6, 7, 8,16,16,16,16,16,16,16};
  97. static const vector unsigned char VSX_NOT_SHIFTED3_3 = {16,16,16,16,16,16,16,16,16, 9,10,11,16,16,16,16};
  98. static const vector unsigned char VSX_NOT_SHIFTED4_3 = {16,16,16,16,16,16,16,16,16,16,16,16,12,13,14,16};
  99. static const vector unsigned char VSX_CHAR_ZERO = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
  100. #ifdef __LITTLE_ENDIAN__
  101. static const vector unsigned char VSX_CHAR_TO_SHORT1_4 = { 4,16, 5,16, 6,16, 7,16,16,16,16,16,16,16,16,16};
  102. static const vector unsigned char VSX_CHAR_TO_SHORT2_4 = { 8,16, 9,16,10,16,11,16,16,16,16,16,16,16,16,16};
  103. static const vector unsigned char VSX_CHAR_TO_SHORT3_4 = {12,16,13,16,14,16,15,16,16,16,16,16,16,16,16,16};
  104. static const vector unsigned char VSX_SHORT_TO_CHAR1_4 = {16,16,16,16, 0, 2, 4, 6,16,16,16,16,16,16,16,16};
  105. static const vector unsigned char VSX_SHORT_TO_CHAR2_4 = {16,16,16,16,16,16,16,16, 0, 2, 4, 6,16,16,16,16};
  106. static const vector unsigned char VSX_SHORT_TO_CHAR3_4 = {16,16,16,16,16,16,16,16,16,16,16,16, 0, 2, 4, 6};
  107. static const vector unsigned char VSX_CHAR_TO_SHORT1_3 = { 3,16, 4,16, 5,16,16,16,16,16,16,16,16,16,16,16};
  108. static const vector unsigned char VSX_CHAR_TO_SHORT2_3 = { 6,16, 7,16, 8,16,16,16,16,16,16,16,16,16,16,16};
  109. static const vector unsigned char VSX_CHAR_TO_SHORT3_3 = { 9,16,10,16,11,16,16,16,16,16,16,16,16,16,16,16};
  110. static const vector unsigned char VSX_CHAR_TO_SHORT4_3 = {12,16,13,16,14,16,16,16,16,16,16,16,16,16,16,16};
  111. static const vector unsigned char VSX_SHORT_TO_CHAR1_3 = {16,16,16, 0, 2, 4,16,16,16,16,16,16,16,16,16,16};
  112. static const vector unsigned char VSX_SHORT_TO_CHAR2_3 = {16,16,16,16,16,16, 0, 2, 4,16,16,16,16,16,16,16};
  113. static const vector unsigned char VSX_SHORT_TO_CHAR3_3 = {16,16,16,16,16,16,16,16,16, 0, 2, 4,16,16,16,16};
  114. static const vector unsigned char VSX_SHORT_TO_CHAR4_3 = {16,16,16,16,16,16,16,16,16,16,16,16, 0, 2, 4,16};
  115. #elif defined(__BIG_ENDIAN__)
  116. static const vector unsigned char VSX_CHAR_TO_SHORT1_4 = {16, 4,16, 5,16, 6,16, 7,16,16,16,16,16,16,16,16};
  117. static const vector unsigned char VSX_CHAR_TO_SHORT2_4 = {16, 8,16, 9,16,10,16,11,16,16,16,16,16,16,16,16};
  118. static const vector unsigned char VSX_CHAR_TO_SHORT3_4 = {16,12,16,13,16,14,16,15,16,16,16,16,16,16,16,16};
  119. static const vector unsigned char VSX_SHORT_TO_CHAR1_4 = {16,16,16,16, 1, 3, 5, 7,16,16,16,16,16,16,16,16};
  120. static const vector unsigned char VSX_SHORT_TO_CHAR2_4 = {16,16,16,16,16,16,16,16, 1, 3, 5, 7,16,16,16,16};
  121. static const vector unsigned char VSX_SHORT_TO_CHAR3_4 = {16,16,16,16,16,16,16,16,16,16,16,16, 1, 3, 5, 7};
  122. static const vector unsigned char VSX_CHAR_TO_SHORT1_3 = {16, 3,16, 4,16, 5,16,16,16,16,16,16,16,16,16,16};
  123. static const vector unsigned char VSX_CHAR_TO_SHORT2_3 = {16, 6,16, 7,16, 8,16,16,16,16,16,16,16,16,16,16};
  124. static const vector unsigned char VSX_CHAR_TO_SHORT3_3 = {16, 9,16,10,16,11,16,16,16,16,16,16,16,16,16,16};
  125. static const vector unsigned char VSX_CHAR_TO_SHORT4_3 = {16,12,16,13,16,14,16,16,16,16,16,16,16,16,16,16};
  126. static const vector unsigned char VSX_SHORT_TO_CHAR1_3 = {16,16,16, 1, 3, 5,16,16,16,16,16,16,16,16,16,16};
  127. static const vector unsigned char VSX_SHORT_TO_CHAR2_3 = {16,16,16,16,16,16, 1, 3, 5,16,16,16,16,16,16,16};
  128. static const vector unsigned char VSX_SHORT_TO_CHAR3_3 = {16,16,16,16,16,16,16,16,16, 1, 3, 5,16,16,16,16};
  129. static const vector unsigned char VSX_SHORT_TO_CHAR4_3 = {16,16,16,16,16,16,16,16,16,16,16,16, 1, 3, 5,16};
  130. #endif
  131. #define vsx_char_to_short(vec,offset,bpp) (vector unsigned short)vec_perm((vec),VSX_CHAR_ZERO,VSX_CHAR_TO_SHORT##offset##_##bpp)
  132. #define vsx_short_to_char(vec,offset,bpp) vec_perm(((vector unsigned char)(vec)),VSX_CHAR_ZERO,VSX_SHORT_TO_CHAR##offset##_##bpp)
  133. #ifdef PNG_USE_ABS
  134. # define vsx_abs(number) abs(number)
  135. #else
  136. # define vsx_abs(number) (number > 0) ? (number) : -(number)
  137. #endif
  138. void png_read_filter_row_sub4_vsx(png_row_infop row_info, png_bytep row,
  139. png_const_bytep prev_row)
  140. {
  141. png_byte bpp = 4;
  142. vector unsigned char rp_vec;
  143. vector unsigned char part_vec;
  144. vsx_declare_common_vars(row_info,row,prev_row,bpp)
  145. PNG_UNUSED(pp)
  146. /* Altivec operations require 16-byte aligned data
  147. * but input can be unaligned. So we calculate
  148. * unaligned part as usual.
  149. */
  150. for (i = 0; i < unaligned_top; i++)
  151. {
  152. *rp = (png_byte)(((int)(*rp) + (int)(*(rp-bpp))) & 0xff);
  153. rp++;
  154. }
  155. /* Using SIMD while we can */
  156. while( istop >= 16 )
  157. {
  158. for(i=0;i < bpp ; i++)
  159. {
  160. *rp = (png_byte)(((int)(*rp) + (int)(*(rp-bpp))) & 0xff);
  161. rp++;
  162. }
  163. rp -= bpp;
  164. rp_vec = vec_ld(0,rp);
  165. part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED1_4);
  166. rp_vec = vec_add(rp_vec,part_vec);
  167. part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED2_4);
  168. rp_vec = vec_add(rp_vec,part_vec);
  169. part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED3_4);
  170. rp_vec = vec_add(rp_vec,part_vec);
  171. vec_st(rp_vec,0,rp);
  172. rp += 16;
  173. istop -= 16;
  174. }
  175. if(istop > 0)
  176. for (i = 0; i < istop % 16; i++)
  177. {
  178. *rp = (png_byte)(((int)(*rp) + (int)(*(rp - bpp))) & 0xff);
  179. rp++;
  180. }
  181. }
  182. void png_read_filter_row_sub3_vsx(png_row_infop row_info, png_bytep row,
  183. png_const_bytep prev_row)
  184. {
  185. png_byte bpp = 3;
  186. vector unsigned char rp_vec;
  187. vector unsigned char part_vec;
  188. vsx_declare_common_vars(row_info,row,prev_row,bpp)
  189. PNG_UNUSED(pp)
  190. /* Altivec operations require 16-byte aligned data
  191. * but input can be unaligned. So we calculate
  192. * unaligned part as usual.
  193. */
  194. for (i = 0; i < unaligned_top; i++)
  195. {
  196. *rp = (png_byte)(((int)(*rp) + (int)(*(rp-bpp))) & 0xff);
  197. rp++;
  198. }
  199. /* Using SIMD while we can */
  200. while( istop >= 16 )
  201. {
  202. for(i=0;i < bpp ; i++)
  203. {
  204. *rp = (png_byte)(((int)(*rp) + (int)(*(rp-bpp))) & 0xff);
  205. rp++;
  206. }
  207. rp -= bpp;
  208. rp_vec = vec_ld(0,rp);
  209. part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED1_3);
  210. rp_vec = vec_add(rp_vec,part_vec);
  211. part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED2_3);
  212. rp_vec = vec_add(rp_vec,part_vec);
  213. part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED3_3);
  214. rp_vec = vec_add(rp_vec,part_vec);
  215. part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED4_3);
  216. rp_vec = vec_add(rp_vec,part_vec);
  217. vec_st(rp_vec,0,rp);
  218. rp += 15;
  219. istop -= 16;
  220. /* Since 16 % bpp = 16 % 3 = 1, last element of array must
  221. * be proceeded manually
  222. */
  223. *rp = (png_byte)(((int)(*rp) + (int)(*(rp-bpp))) & 0xff);
  224. rp++;
  225. }
  226. if(istop > 0)
  227. for (i = 0; i < istop % 16; i++)
  228. {
  229. *rp = (png_byte)(((int)(*rp) + (int)(*(rp-bpp))) & 0xff);
  230. rp++;
  231. }
  232. }
  233. void png_read_filter_row_avg4_vsx(png_row_infop row_info, png_bytep row,
  234. png_const_bytep prev_row)
  235. {
  236. png_byte bpp = 4;
  237. vector unsigned char rp_vec;
  238. vector unsigned char pp_vec;
  239. vector unsigned char pp_part_vec;
  240. vector unsigned char rp_part_vec;
  241. vector unsigned char avg_vec;
  242. vsx_declare_common_vars(row_info,row,prev_row,bpp)
  243. rp -= bpp;
  244. if(istop >= bpp)
  245. istop -= bpp;
  246. for (i = 0; i < bpp; i++)
  247. {
  248. *rp = (png_byte)(((int)(*rp) +
  249. ((int)(*pp++) / 2 )) & 0xff);
  250. rp++;
  251. }
  252. /* Altivec operations require 16-byte aligned data
  253. * but input can be unaligned. So we calculate
  254. * unaligned part as usual.
  255. */
  256. for (i = 0; i < unaligned_top; i++)
  257. {
  258. *rp = (png_byte)(((int)(*rp) +
  259. (int)(*pp++ + *(rp-bpp)) / 2 ) & 0xff);
  260. rp++;
  261. }
  262. /* Using SIMD while we can */
  263. while( istop >= 16 )
  264. {
  265. for(i=0;i < bpp ; i++)
  266. {
  267. *rp = (png_byte)(((int)(*rp) +
  268. (int)(*pp++ + *(rp-bpp)) / 2 ) & 0xff);
  269. rp++;
  270. }
  271. rp -= bpp;
  272. pp -= bpp;
  273. vec_ld_unaligned(pp_vec,pp);
  274. rp_vec = vec_ld(0,rp);
  275. rp_part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED1_4);
  276. pp_part_vec = vec_perm(pp_vec,VSX_CHAR_ZERO,VSX_NOT_SHIFTED1_4);
  277. avg_vec = vec_avg(rp_part_vec,pp_part_vec);
  278. avg_vec = vec_sub(avg_vec, vec_and(vec_xor(rp_part_vec,pp_part_vec),vec_splat_u8(1)));
  279. rp_vec = vec_add(rp_vec,avg_vec);
  280. rp_part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED2_4);
  281. pp_part_vec = vec_perm(pp_vec,VSX_CHAR_ZERO,VSX_NOT_SHIFTED2_4);
  282. avg_vec = vec_avg(rp_part_vec,pp_part_vec);
  283. avg_vec = vec_sub(avg_vec, vec_and(vec_xor(rp_part_vec,pp_part_vec),vec_splat_u8(1)));
  284. rp_vec = vec_add(rp_vec,avg_vec);
  285. rp_part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED3_4);
  286. pp_part_vec = vec_perm(pp_vec,VSX_CHAR_ZERO,VSX_NOT_SHIFTED3_4);
  287. avg_vec = vec_avg(rp_part_vec,pp_part_vec);
  288. avg_vec = vec_sub(avg_vec, vec_and(vec_xor(rp_part_vec,pp_part_vec),vec_splat_u8(1)));
  289. rp_vec = vec_add(rp_vec,avg_vec);
  290. vec_st(rp_vec,0,rp);
  291. rp += 16;
  292. pp += 16;
  293. istop -= 16;
  294. }
  295. if(istop > 0)
  296. for (i = 0; i < istop % 16; i++)
  297. {
  298. *rp = (png_byte)(((int)(*rp) +
  299. (int)(*pp++ + *(rp-bpp)) / 2 ) & 0xff);
  300. rp++;
  301. }
  302. }
  303. void png_read_filter_row_avg3_vsx(png_row_infop row_info, png_bytep row,
  304. png_const_bytep prev_row)
  305. {
  306. png_byte bpp = 3;
  307. vector unsigned char rp_vec;
  308. vector unsigned char pp_vec;
  309. vector unsigned char pp_part_vec;
  310. vector unsigned char rp_part_vec;
  311. vector unsigned char avg_vec;
  312. vsx_declare_common_vars(row_info,row,prev_row,bpp)
  313. rp -= bpp;
  314. if(istop >= bpp)
  315. istop -= bpp;
  316. for (i = 0; i < bpp; i++)
  317. {
  318. *rp = (png_byte)(((int)(*rp) +
  319. ((int)(*pp++) / 2 )) & 0xff);
  320. rp++;
  321. }
  322. /* Altivec operations require 16-byte aligned data
  323. * but input can be unaligned. So we calculate
  324. * unaligned part as usual.
  325. */
  326. for (i = 0; i < unaligned_top; i++)
  327. {
  328. *rp = (png_byte)(((int)(*rp) +
  329. (int)(*pp++ + *(rp-bpp)) / 2 ) & 0xff);
  330. rp++;
  331. }
  332. /* Using SIMD while we can */
  333. while( istop >= 16 )
  334. {
  335. for(i=0;i < bpp ; i++)
  336. {
  337. *rp = (png_byte)(((int)(*rp) +
  338. (int)(*pp++ + *(rp-bpp)) / 2 ) & 0xff);
  339. rp++;
  340. }
  341. rp -= bpp;
  342. pp -= bpp;
  343. vec_ld_unaligned(pp_vec,pp);
  344. rp_vec = vec_ld(0,rp);
  345. rp_part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED1_3);
  346. pp_part_vec = vec_perm(pp_vec,VSX_CHAR_ZERO,VSX_NOT_SHIFTED1_3);
  347. avg_vec = vec_avg(rp_part_vec,pp_part_vec);
  348. avg_vec = vec_sub(avg_vec, vec_and(vec_xor(rp_part_vec,pp_part_vec),vec_splat_u8(1)));
  349. rp_vec = vec_add(rp_vec,avg_vec);
  350. rp_part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED2_3);
  351. pp_part_vec = vec_perm(pp_vec,VSX_CHAR_ZERO,VSX_NOT_SHIFTED2_3);
  352. avg_vec = vec_avg(rp_part_vec,pp_part_vec);
  353. avg_vec = vec_sub(avg_vec, vec_and(vec_xor(rp_part_vec,pp_part_vec),vec_splat_u8(1)));
  354. rp_vec = vec_add(rp_vec,avg_vec);
  355. rp_part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED3_3);
  356. pp_part_vec = vec_perm(pp_vec,VSX_CHAR_ZERO,VSX_NOT_SHIFTED3_3);
  357. avg_vec = vec_avg(rp_part_vec,pp_part_vec);
  358. avg_vec = vec_sub(avg_vec, vec_and(vec_xor(rp_part_vec,pp_part_vec),vec_splat_u8(1)));
  359. rp_vec = vec_add(rp_vec,avg_vec);
  360. rp_part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED4_3);
  361. pp_part_vec = vec_perm(pp_vec,VSX_CHAR_ZERO,VSX_NOT_SHIFTED4_3);
  362. avg_vec = vec_avg(rp_part_vec,pp_part_vec);
  363. avg_vec = vec_sub(avg_vec, vec_and(vec_xor(rp_part_vec,pp_part_vec),vec_splat_u8(1)));
  364. rp_vec = vec_add(rp_vec,avg_vec);
  365. vec_st(rp_vec,0,rp);
  366. rp += 15;
  367. pp += 15;
  368. istop -= 16;
  369. /* Since 16 % bpp = 16 % 3 = 1, last element of array must
  370. * be proceeded manually
  371. */
  372. *rp = (png_byte)(((int)(*rp) +
  373. (int)(*pp++ + *(rp-bpp)) / 2 ) & 0xff);
  374. rp++;
  375. }
  376. if(istop > 0)
  377. for (i = 0; i < istop % 16; i++)
  378. {
  379. *rp = (png_byte)(((int)(*rp) +
  380. (int)(*pp++ + *(rp-bpp)) / 2 ) & 0xff);
  381. rp++;
  382. }
  383. }
  384. /* Bytewise c ? t : e. */
  385. #define if_then_else(c,t,e) vec_sel(e,t,c)
  386. #define vsx_paeth_process(rp,pp,a,b,c,pa,pb,pc,bpp) {\
  387. c = *(pp - bpp);\
  388. a = *(rp - bpp);\
  389. b = *pp++;\
  390. p = b - c;\
  391. pc = a - c;\
  392. pa = vsx_abs(p);\
  393. pb = vsx_abs(pc);\
  394. pc = vsx_abs(p + pc);\
  395. if (pb < pa) pa = pb, a = b;\
  396. if (pc < pa) a = c;\
  397. a += *rp;\
  398. *rp++ = (png_byte)a;\
  399. }
  400. void png_read_filter_row_paeth4_vsx(png_row_infop row_info, png_bytep row,
  401. png_const_bytep prev_row)
  402. {
  403. png_byte bpp = 4;
  404. int a, b, c, pa, pb, pc, p;
  405. vector unsigned char rp_vec;
  406. vector unsigned char pp_vec;
  407. vector unsigned short a_vec,b_vec,c_vec,nearest_vec;
  408. vector signed short pa_vec,pb_vec,pc_vec,smallest_vec;
  409. vsx_declare_common_vars(row_info,row,prev_row,bpp)
  410. rp -= bpp;
  411. if(istop >= bpp)
  412. istop -= bpp;
  413. /* Process the first pixel in the row completely (this is the same as 'up'
  414. * because there is only one candidate predictor for the first row).
  415. */
  416. for(i = 0; i < bpp ; i++)
  417. {
  418. *rp = (png_byte)( *rp + *pp);
  419. rp++;
  420. pp++;
  421. }
  422. for(i = 0; i < unaligned_top ; i++)
  423. {
  424. vsx_paeth_process(rp,pp,a,b,c,pa,pb,pc,bpp)
  425. }
  426. while( istop >= 16)
  427. {
  428. for(i = 0; i < bpp ; i++)
  429. {
  430. vsx_paeth_process(rp,pp,a,b,c,pa,pb,pc,bpp)
  431. }
  432. rp -= bpp;
  433. pp -= bpp;
  434. rp_vec = vec_ld(0,rp);
  435. vec_ld_unaligned(pp_vec,pp);
  436. a_vec = vsx_char_to_short(vec_perm(rp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED1_4),1,4);
  437. b_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_NOT_SHIFTED1_4),1,4);
  438. c_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED1_4),1,4);
  439. pa_vec = (vector signed short) vec_sub(b_vec,c_vec);
  440. pb_vec = (vector signed short) vec_sub(a_vec , c_vec);
  441. pc_vec = vec_add(pa_vec,pb_vec);
  442. pa_vec = vec_abs(pa_vec);
  443. pb_vec = vec_abs(pb_vec);
  444. pc_vec = vec_abs(pc_vec);
  445. smallest_vec = vec_min(pc_vec, vec_min(pa_vec,pb_vec));
  446. nearest_vec = if_then_else(
  447. vec_cmpeq(pa_vec,smallest_vec),
  448. a_vec,
  449. if_then_else(
  450. vec_cmpeq(pb_vec,smallest_vec),
  451. b_vec,
  452. c_vec
  453. )
  454. );
  455. rp_vec = vec_add(rp_vec,(vsx_short_to_char(nearest_vec,1,4)));
  456. a_vec = vsx_char_to_short(vec_perm(rp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED2_4),2,4);
  457. b_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_NOT_SHIFTED2_4),2,4);
  458. c_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED2_4),2,4);
  459. pa_vec = (vector signed short) vec_sub(b_vec,c_vec);
  460. pb_vec = (vector signed short) vec_sub(a_vec , c_vec);
  461. pc_vec = vec_add(pa_vec,pb_vec);
  462. pa_vec = vec_abs(pa_vec);
  463. pb_vec = vec_abs(pb_vec);
  464. pc_vec = vec_abs(pc_vec);
  465. smallest_vec = vec_min(pc_vec, vec_min(pa_vec,pb_vec));
  466. nearest_vec = if_then_else(
  467. vec_cmpeq(pa_vec,smallest_vec),
  468. a_vec,
  469. if_then_else(
  470. vec_cmpeq(pb_vec,smallest_vec),
  471. b_vec,
  472. c_vec
  473. )
  474. );
  475. rp_vec = vec_add(rp_vec,(vsx_short_to_char(nearest_vec,2,4)));
  476. a_vec = vsx_char_to_short(vec_perm(rp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED3_4),3,4);
  477. b_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_NOT_SHIFTED3_4),3,4);
  478. c_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED3_4),3,4);
  479. pa_vec = (vector signed short) vec_sub(b_vec,c_vec);
  480. pb_vec = (vector signed short) vec_sub(a_vec , c_vec);
  481. pc_vec = vec_add(pa_vec,pb_vec);
  482. pa_vec = vec_abs(pa_vec);
  483. pb_vec = vec_abs(pb_vec);
  484. pc_vec = vec_abs(pc_vec);
  485. smallest_vec = vec_min(pc_vec, vec_min(pa_vec,pb_vec));
  486. nearest_vec = if_then_else(
  487. vec_cmpeq(pa_vec,smallest_vec),
  488. a_vec,
  489. if_then_else(
  490. vec_cmpeq(pb_vec,smallest_vec),
  491. b_vec,
  492. c_vec
  493. )
  494. );
  495. rp_vec = vec_add(rp_vec,(vsx_short_to_char(nearest_vec,3,4)));
  496. vec_st(rp_vec,0,rp);
  497. rp += 16;
  498. pp += 16;
  499. istop -= 16;
  500. }
  501. if(istop > 0)
  502. for (i = 0; i < istop % 16; i++)
  503. {
  504. vsx_paeth_process(rp,pp,a,b,c,pa,pb,pc,bpp)
  505. }
  506. }
  507. void png_read_filter_row_paeth3_vsx(png_row_infop row_info, png_bytep row,
  508. png_const_bytep prev_row)
  509. {
  510. png_byte bpp = 3;
  511. int a, b, c, pa, pb, pc, p;
  512. vector unsigned char rp_vec;
  513. vector unsigned char pp_vec;
  514. vector unsigned short a_vec,b_vec,c_vec,nearest_vec;
  515. vector signed short pa_vec,pb_vec,pc_vec,smallest_vec;
  516. vsx_declare_common_vars(row_info,row,prev_row,bpp)
  517. rp -= bpp;
  518. if(istop >= bpp)
  519. istop -= bpp;
  520. /* Process the first pixel in the row completely (this is the same as 'up'
  521. * because there is only one candidate predictor for the first row).
  522. */
  523. for(i = 0; i < bpp ; i++)
  524. {
  525. *rp = (png_byte)( *rp + *pp);
  526. rp++;
  527. pp++;
  528. }
  529. for(i = 0; i < unaligned_top ; i++)
  530. {
  531. vsx_paeth_process(rp,pp,a,b,c,pa,pb,pc,bpp)
  532. }
  533. while( istop >= 16)
  534. {
  535. for(i = 0; i < bpp ; i++)
  536. {
  537. vsx_paeth_process(rp,pp,a,b,c,pa,pb,pc,bpp)
  538. }
  539. rp -= bpp;
  540. pp -= bpp;
  541. rp_vec = vec_ld(0,rp);
  542. vec_ld_unaligned(pp_vec,pp);
  543. a_vec = vsx_char_to_short(vec_perm(rp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED1_3),1,3);
  544. b_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_NOT_SHIFTED1_3),1,3);
  545. c_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED1_3),1,3);
  546. pa_vec = (vector signed short) vec_sub(b_vec,c_vec);
  547. pb_vec = (vector signed short) vec_sub(a_vec , c_vec);
  548. pc_vec = vec_add(pa_vec,pb_vec);
  549. pa_vec = vec_abs(pa_vec);
  550. pb_vec = vec_abs(pb_vec);
  551. pc_vec = vec_abs(pc_vec);
  552. smallest_vec = vec_min(pc_vec, vec_min(pa_vec,pb_vec));
  553. nearest_vec = if_then_else(
  554. vec_cmpeq(pa_vec,smallest_vec),
  555. a_vec,
  556. if_then_else(
  557. vec_cmpeq(pb_vec,smallest_vec),
  558. b_vec,
  559. c_vec
  560. )
  561. );
  562. rp_vec = vec_add(rp_vec,(vsx_short_to_char(nearest_vec,1,3)));
  563. a_vec = vsx_char_to_short(vec_perm(rp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED2_3),2,3);
  564. b_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_NOT_SHIFTED2_3),2,3);
  565. c_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED2_3),2,3);
  566. pa_vec = (vector signed short) vec_sub(b_vec,c_vec);
  567. pb_vec = (vector signed short) vec_sub(a_vec , c_vec);
  568. pc_vec = vec_add(pa_vec,pb_vec);
  569. pa_vec = vec_abs(pa_vec);
  570. pb_vec = vec_abs(pb_vec);
  571. pc_vec = vec_abs(pc_vec);
  572. smallest_vec = vec_min(pc_vec, vec_min(pa_vec,pb_vec));
  573. nearest_vec = if_then_else(
  574. vec_cmpeq(pa_vec,smallest_vec),
  575. a_vec,
  576. if_then_else(
  577. vec_cmpeq(pb_vec,smallest_vec),
  578. b_vec,
  579. c_vec
  580. )
  581. );
  582. rp_vec = vec_add(rp_vec,(vsx_short_to_char(nearest_vec,2,3)));
  583. a_vec = vsx_char_to_short(vec_perm(rp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED3_3),3,3);
  584. b_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_NOT_SHIFTED3_3),3,3);
  585. c_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED3_3),3,3);
  586. pa_vec = (vector signed short) vec_sub(b_vec,c_vec);
  587. pb_vec = (vector signed short) vec_sub(a_vec , c_vec);
  588. pc_vec = vec_add(pa_vec,pb_vec);
  589. pa_vec = vec_abs(pa_vec);
  590. pb_vec = vec_abs(pb_vec);
  591. pc_vec = vec_abs(pc_vec);
  592. smallest_vec = vec_min(pc_vec, vec_min(pa_vec,pb_vec));
  593. nearest_vec = if_then_else(
  594. vec_cmpeq(pa_vec,smallest_vec),
  595. a_vec,
  596. if_then_else(
  597. vec_cmpeq(pb_vec,smallest_vec),
  598. b_vec,
  599. c_vec
  600. )
  601. );
  602. rp_vec = vec_add(rp_vec,(vsx_short_to_char(nearest_vec,3,3)));
  603. a_vec = vsx_char_to_short(vec_perm(rp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED4_3),4,3);
  604. b_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_NOT_SHIFTED4_3),4,3);
  605. c_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED4_3),4,3);
  606. pa_vec = (vector signed short) vec_sub(b_vec,c_vec);
  607. pb_vec = (vector signed short) vec_sub(a_vec , c_vec);
  608. pc_vec = vec_add(pa_vec,pb_vec);
  609. pa_vec = vec_abs(pa_vec);
  610. pb_vec = vec_abs(pb_vec);
  611. pc_vec = vec_abs(pc_vec);
  612. smallest_vec = vec_min(pc_vec, vec_min(pa_vec,pb_vec));
  613. nearest_vec = if_then_else(
  614. vec_cmpeq(pa_vec,smallest_vec),
  615. a_vec,
  616. if_then_else(
  617. vec_cmpeq(pb_vec,smallest_vec),
  618. b_vec,
  619. c_vec
  620. )
  621. );
  622. rp_vec = vec_add(rp_vec,(vsx_short_to_char(nearest_vec,4,3)));
  623. vec_st(rp_vec,0,rp);
  624. rp += 15;
  625. pp += 15;
  626. istop -= 16;
  627. /* Since 16 % bpp = 16 % 3 = 1, last element of array must
  628. * be proceeded manually
  629. */
  630. vsx_paeth_process(rp,pp,a,b,c,pa,pb,pc,bpp)
  631. }
  632. if(istop > 0)
  633. for (i = 0; i < istop % 16; i++)
  634. {
  635. vsx_paeth_process(rp,pp,a,b,c,pa,pb,pc,bpp)
  636. }
  637. }
  638. #endif /* PNG_POWERPC_VSX_OPT > 0 */
  639. #endif /* PNG_POWERPC_VSX_IMPLEMENTATION == 1 (intrinsics) */
  640. #endif /* READ */