SkRasterPipeline_opts.h 141 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961
  1. /*
  2. * Copyright 2018 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #ifndef SkRasterPipeline_opts_DEFINED
  8. #define SkRasterPipeline_opts_DEFINED
  9. #include "include/core/SkTypes.h"
  10. #include "src/core/SkUtils.h" // unaligned_{load,store}
  11. #include "src/sksl/SkSLByteCode.h"
  12. // Every function in this file should be marked static and inline using SI.
  13. #if defined(__clang__)
  14. #define SI __attribute__((always_inline)) static inline
  15. #else
  16. #define SI static inline
  17. #endif
  18. template <typename Dst, typename Src>
  19. SI Dst bit_cast(const Src& src) {
  20. static_assert(sizeof(Dst) == sizeof(Src), "");
  21. return sk_unaligned_load<Dst>(&src);
  22. }
  23. template <typename Dst, typename Src>
  24. SI Dst widen_cast(const Src& src) {
  25. static_assert(sizeof(Dst) > sizeof(Src), "");
  26. Dst dst;
  27. memcpy(&dst, &src, sizeof(Src));
  28. return dst;
  29. }
  30. // Our program is an array of void*, either
  31. // - 1 void* per stage with no context pointer, the next stage;
  32. // - 2 void* per stage with a context pointer, first the context pointer, then the next stage.
  33. // load_and_inc() steps the program forward by 1 void*, returning that pointer.
  34. SI void* load_and_inc(void**& program) {
  35. #if defined(__GNUC__) && defined(__x86_64__)
  36. // If program is in %rsi (we try to make this likely) then this is a single instruction.
  37. void* rax;
  38. asm("lodsq" : "=a"(rax), "+S"(program)); // Write-only %rax, read-write %rsi.
  39. return rax;
  40. #else
  41. // On ARM *program++ compiles into pretty ideal code without any handholding.
  42. return *program++;
  43. #endif
  44. }
  45. // Lazily resolved on first cast. Does nothing if cast to Ctx::None.
  46. struct Ctx {
  47. struct None {};
  48. void* ptr;
  49. void**& program;
  50. explicit Ctx(void**& p) : ptr(nullptr), program(p) {}
  51. template <typename T>
  52. operator T*() {
  53. if (!ptr) { ptr = load_and_inc(program); }
  54. return (T*)ptr;
  55. }
  56. operator None() { return None{}; }
  57. };
  58. #if !defined(__clang__)
  59. #define JUMPER_IS_SCALAR
  60. #elif defined(SK_ARM_HAS_NEON)
  61. #define JUMPER_IS_NEON
  62. #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX512
  63. #define JUMPER_IS_AVX512
  64. #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
  65. #define JUMPER_IS_HSW
  66. #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX
  67. #define JUMPER_IS_AVX
  68. #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
  69. #define JUMPER_IS_SSE41
  70. #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
  71. #define JUMPER_IS_SSE2
  72. #else
  73. #define JUMPER_IS_SCALAR
  74. #endif
  75. // Older Clangs seem to crash when generating non-optimized NEON code for ARMv7.
  76. #if defined(__clang__) && !defined(__OPTIMIZE__) && defined(SK_CPU_ARM32)
  77. // Apple Clang 9 and vanilla Clang 5 are fine, and may even be conservative.
  78. #if defined(__apple_build_version__) && __clang_major__ < 9
  79. #define JUMPER_IS_SCALAR
  80. #elif __clang_major__ < 5
  81. #define JUMPER_IS_SCALAR
  82. #endif
  83. #if defined(JUMPER_IS_NEON) && defined(JUMPER_IS_SCALAR)
  84. #undef JUMPER_IS_NEON
  85. #endif
  86. #endif
  87. #if defined(JUMPER_IS_SCALAR)
  88. #include <math.h>
  89. #elif defined(JUMPER_IS_NEON)
  90. #include <arm_neon.h>
  91. #else
  92. #include <immintrin.h>
  93. #endif
  94. namespace SK_OPTS_NS {
  95. #if defined(JUMPER_IS_SCALAR)
  96. // This path should lead to portable scalar code.
  97. using F = float ;
  98. using I32 = int32_t;
  99. using U64 = uint64_t;
  100. using U32 = uint32_t;
  101. using U16 = uint16_t;
  102. using U8 = uint8_t ;
  103. SI F mad(F f, F m, F a) { return f*m+a; }
  104. SI F min(F a, F b) { return fminf(a,b); }
  105. SI F max(F a, F b) { return fmaxf(a,b); }
  106. SI F abs_ (F v) { return fabsf(v); }
  107. SI F floor_(F v) { return floorf(v); }
  108. SI F rcp (F v) { return 1.0f / v; }
  109. SI F rsqrt (F v) { return 1.0f / sqrtf(v); }
  110. SI F sqrt_(F v) { return sqrtf(v); }
  111. SI U32 round (F v, F scale) { return (uint32_t)(v*scale + 0.5f); }
  112. SI U16 pack(U32 v) { return (U16)v; }
  113. SI U8 pack(U16 v) { return (U8)v; }
  114. SI F if_then_else(I32 c, F t, F e) { return c ? t : e; }
  115. template <typename T>
  116. SI T gather(const T* p, U32 ix) { return p[ix]; }
  117. SI void load2(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
  118. *r = ptr[0];
  119. *g = ptr[1];
  120. }
  121. SI void store2(uint16_t* ptr, size_t tail, U16 r, U16 g) {
  122. ptr[0] = r;
  123. ptr[1] = g;
  124. }
  125. SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
  126. *r = ptr[0];
  127. *g = ptr[1];
  128. *b = ptr[2];
  129. }
  130. SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
  131. *r = ptr[0];
  132. *g = ptr[1];
  133. *b = ptr[2];
  134. *a = ptr[3];
  135. }
  136. SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
  137. ptr[0] = r;
  138. ptr[1] = g;
  139. ptr[2] = b;
  140. ptr[3] = a;
  141. }
  142. SI void load2(const float* ptr, size_t tail, F* r, F* g) {
  143. *r = ptr[0];
  144. *g = ptr[1];
  145. }
  146. SI void store2(float* ptr, size_t tail, F r, F g) {
  147. ptr[0] = r;
  148. ptr[1] = g;
  149. }
  150. SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
  151. *r = ptr[0];
  152. *g = ptr[1];
  153. *b = ptr[2];
  154. *a = ptr[3];
  155. }
  156. SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
  157. ptr[0] = r;
  158. ptr[1] = g;
  159. ptr[2] = b;
  160. ptr[3] = a;
  161. }
  162. #elif defined(JUMPER_IS_NEON)
  163. // Since we know we're using Clang, we can use its vector extensions.
  164. template <typename T> using V = T __attribute__((ext_vector_type(4)));
  165. using F = V<float >;
  166. using I32 = V< int32_t>;
  167. using U64 = V<uint64_t>;
  168. using U32 = V<uint32_t>;
  169. using U16 = V<uint16_t>;
  170. using U8 = V<uint8_t >;
  171. // We polyfill a few routines that Clang doesn't build into ext_vector_types.
  172. SI F min(F a, F b) { return vminq_f32(a,b); }
  173. SI F max(F a, F b) { return vmaxq_f32(a,b); }
  174. SI F abs_ (F v) { return vabsq_f32(v); }
  175. SI F rcp (F v) { auto e = vrecpeq_f32 (v); return vrecpsq_f32 (v,e ) * e; }
  176. SI F rsqrt (F v) { auto e = vrsqrteq_f32(v); return vrsqrtsq_f32(v,e*e) * e; }
  177. SI U16 pack(U32 v) { return __builtin_convertvector(v, U16); }
  178. SI U8 pack(U16 v) { return __builtin_convertvector(v, U8); }
  179. SI F if_then_else(I32 c, F t, F e) { return vbslq_f32((U32)c,t,e); }
  180. #if defined(SK_CPU_ARM64)
  181. SI F mad(F f, F m, F a) { return vfmaq_f32(a,f,m); }
  182. SI F floor_(F v) { return vrndmq_f32(v); }
  183. SI F sqrt_(F v) { return vsqrtq_f32(v); }
  184. SI U32 round(F v, F scale) { return vcvtnq_u32_f32(v*scale); }
  185. #else
  186. SI F mad(F f, F m, F a) { return vmlaq_f32(a,f,m); }
  187. SI F floor_(F v) {
  188. F roundtrip = vcvtq_f32_s32(vcvtq_s32_f32(v));
  189. return roundtrip - if_then_else(roundtrip > v, 1, 0);
  190. }
  191. SI F sqrt_(F v) {
  192. auto e = vrsqrteq_f32(v); // Estimate and two refinement steps for e = rsqrt(v).
  193. e *= vrsqrtsq_f32(v,e*e);
  194. e *= vrsqrtsq_f32(v,e*e);
  195. return v*e; // sqrt(v) == v*rsqrt(v).
  196. }
  197. SI U32 round(F v, F scale) {
  198. return vcvtq_u32_f32(mad(v,scale,0.5f));
  199. }
  200. #endif
  201. template <typename T>
  202. SI V<T> gather(const T* p, U32 ix) {
  203. return {p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]]};
  204. }
  205. SI void load2(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
  206. uint16x4x2_t rg;
  207. if (__builtin_expect(tail,0)) {
  208. if ( true ) { rg = vld2_lane_u16(ptr + 0, rg, 0); }
  209. if (tail > 1) { rg = vld2_lane_u16(ptr + 2, rg, 1); }
  210. if (tail > 2) { rg = vld2_lane_u16(ptr + 4, rg, 2); }
  211. } else {
  212. rg = vld2_u16(ptr);
  213. }
  214. *r = rg.val[0];
  215. *g = rg.val[1];
  216. }
  217. SI void store2(uint16_t* ptr, size_t tail, U16 r, U16 g) {
  218. if (__builtin_expect(tail,0)) {
  219. if ( true ) { vst2_lane_u16(ptr + 0, (uint16x4x2_t{{r,g}}), 0); }
  220. if (tail > 1) { vst2_lane_u16(ptr + 2, (uint16x4x2_t{{r,g}}), 1); }
  221. if (tail > 2) { vst2_lane_u16(ptr + 4, (uint16x4x2_t{{r,g}}), 2); }
  222. } else {
  223. vst2_u16(ptr, (uint16x4x2_t{{r,g}}));
  224. }
  225. }
  226. SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
  227. uint16x4x3_t rgb;
  228. if (__builtin_expect(tail,0)) {
  229. if ( true ) { rgb = vld3_lane_u16(ptr + 0, rgb, 0); }
  230. if (tail > 1) { rgb = vld3_lane_u16(ptr + 3, rgb, 1); }
  231. if (tail > 2) { rgb = vld3_lane_u16(ptr + 6, rgb, 2); }
  232. } else {
  233. rgb = vld3_u16(ptr);
  234. }
  235. *r = rgb.val[0];
  236. *g = rgb.val[1];
  237. *b = rgb.val[2];
  238. }
  239. SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
  240. uint16x4x4_t rgba;
  241. if (__builtin_expect(tail,0)) {
  242. if ( true ) { rgba = vld4_lane_u16(ptr + 0, rgba, 0); }
  243. if (tail > 1) { rgba = vld4_lane_u16(ptr + 4, rgba, 1); }
  244. if (tail > 2) { rgba = vld4_lane_u16(ptr + 8, rgba, 2); }
  245. } else {
  246. rgba = vld4_u16(ptr);
  247. }
  248. *r = rgba.val[0];
  249. *g = rgba.val[1];
  250. *b = rgba.val[2];
  251. *a = rgba.val[3];
  252. }
  253. SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
  254. if (__builtin_expect(tail,0)) {
  255. if ( true ) { vst4_lane_u16(ptr + 0, (uint16x4x4_t{{r,g,b,a}}), 0); }
  256. if (tail > 1) { vst4_lane_u16(ptr + 4, (uint16x4x4_t{{r,g,b,a}}), 1); }
  257. if (tail > 2) { vst4_lane_u16(ptr + 8, (uint16x4x4_t{{r,g,b,a}}), 2); }
  258. } else {
  259. vst4_u16(ptr, (uint16x4x4_t{{r,g,b,a}}));
  260. }
  261. }
  262. SI void load2(const float* ptr, size_t tail, F* r, F* g) {
  263. float32x4x2_t rg;
  264. if (__builtin_expect(tail,0)) {
  265. if ( true ) { rg = vld2q_lane_f32(ptr + 0, rg, 0); }
  266. if (tail > 1) { rg = vld2q_lane_f32(ptr + 2, rg, 1); }
  267. if (tail > 2) { rg = vld2q_lane_f32(ptr + 4, rg, 2); }
  268. } else {
  269. rg = vld2q_f32(ptr);
  270. }
  271. *r = rg.val[0];
  272. *g = rg.val[1];
  273. }
  274. SI void store2(float* ptr, size_t tail, F r, F g) {
  275. if (__builtin_expect(tail,0)) {
  276. if ( true ) { vst2q_lane_f32(ptr + 0, (float32x4x2_t{{r,g}}), 0); }
  277. if (tail > 1) { vst2q_lane_f32(ptr + 2, (float32x4x2_t{{r,g}}), 1); }
  278. if (tail > 2) { vst2q_lane_f32(ptr + 4, (float32x4x2_t{{r,g}}), 2); }
  279. } else {
  280. vst2q_f32(ptr, (float32x4x2_t{{r,g}}));
  281. }
  282. }
  283. SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
  284. float32x4x4_t rgba;
  285. if (__builtin_expect(tail,0)) {
  286. if ( true ) { rgba = vld4q_lane_f32(ptr + 0, rgba, 0); }
  287. if (tail > 1) { rgba = vld4q_lane_f32(ptr + 4, rgba, 1); }
  288. if (tail > 2) { rgba = vld4q_lane_f32(ptr + 8, rgba, 2); }
  289. } else {
  290. rgba = vld4q_f32(ptr);
  291. }
  292. *r = rgba.val[0];
  293. *g = rgba.val[1];
  294. *b = rgba.val[2];
  295. *a = rgba.val[3];
  296. }
  297. SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
  298. if (__builtin_expect(tail,0)) {
  299. if ( true ) { vst4q_lane_f32(ptr + 0, (float32x4x4_t{{r,g,b,a}}), 0); }
  300. if (tail > 1) { vst4q_lane_f32(ptr + 4, (float32x4x4_t{{r,g,b,a}}), 1); }
  301. if (tail > 2) { vst4q_lane_f32(ptr + 8, (float32x4x4_t{{r,g,b,a}}), 2); }
  302. } else {
  303. vst4q_f32(ptr, (float32x4x4_t{{r,g,b,a}}));
  304. }
  305. }
  306. #elif defined(JUMPER_IS_AVX) || defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
  307. // These are __m256 and __m256i, but friendlier and strongly-typed.
  308. template <typename T> using V = T __attribute__((ext_vector_type(8)));
  309. using F = V<float >;
  310. using I32 = V< int32_t>;
  311. using U64 = V<uint64_t>;
  312. using U32 = V<uint32_t>;
  313. using U16 = V<uint16_t>;
  314. using U8 = V<uint8_t >;
  315. SI F mad(F f, F m, F a) {
  316. #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
  317. return _mm256_fmadd_ps(f,m,a);
  318. #else
  319. return f*m+a;
  320. #endif
  321. }
  322. SI F min(F a, F b) { return _mm256_min_ps(a,b); }
  323. SI F max(F a, F b) { return _mm256_max_ps(a,b); }
  324. SI F abs_ (F v) { return _mm256_and_ps(v, 0-v); }
  325. SI F floor_(F v) { return _mm256_floor_ps(v); }
  326. SI F rcp (F v) { return _mm256_rcp_ps (v); }
  327. SI F rsqrt (F v) { return _mm256_rsqrt_ps(v); }
  328. SI F sqrt_(F v) { return _mm256_sqrt_ps (v); }
  329. SI U32 round (F v, F scale) { return _mm256_cvtps_epi32(v*scale); }
  330. SI U16 pack(U32 v) {
  331. return _mm_packus_epi32(_mm256_extractf128_si256(v, 0),
  332. _mm256_extractf128_si256(v, 1));
  333. }
  334. SI U8 pack(U16 v) {
  335. auto r = _mm_packus_epi16(v,v);
  336. return sk_unaligned_load<U8>(&r);
  337. }
  338. SI F if_then_else(I32 c, F t, F e) { return _mm256_blendv_ps(e,t,c); }
  339. template <typename T>
  340. SI V<T> gather(const T* p, U32 ix) {
  341. return { p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]],
  342. p[ix[4]], p[ix[5]], p[ix[6]], p[ix[7]], };
  343. }
  344. #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
  345. SI F gather(const float* p, U32 ix) { return _mm256_i32gather_ps (p, ix, 4); }
  346. SI U32 gather(const uint32_t* p, U32 ix) { return _mm256_i32gather_epi32(p, ix, 4); }
  347. SI U64 gather(const uint64_t* p, U32 ix) {
  348. __m256i parts[] = {
  349. _mm256_i32gather_epi64(p, _mm256_extracti128_si256(ix,0), 8),
  350. _mm256_i32gather_epi64(p, _mm256_extracti128_si256(ix,1), 8),
  351. };
  352. return bit_cast<U64>(parts);
  353. }
  354. #endif
  355. SI void load2(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
  356. U16 _0123, _4567;
  357. if (__builtin_expect(tail,0)) {
  358. _0123 = _4567 = _mm_setzero_si128();
  359. auto* d = &_0123;
  360. if (tail > 3) {
  361. *d = _mm_loadu_si128(((__m128i*)ptr) + 0);
  362. tail -= 4;
  363. ptr += 8;
  364. d = &_4567;
  365. }
  366. bool high = false;
  367. if (tail > 1) {
  368. *d = _mm_loadu_si64(ptr);
  369. tail -= 2;
  370. ptr += 4;
  371. high = true;
  372. }
  373. if (tail > 0) {
  374. (*d)[high ? 4 : 0] = *(ptr + 0);
  375. (*d)[high ? 5 : 1] = *(ptr + 1);
  376. }
  377. } else {
  378. _0123 = _mm_loadu_si128(((__m128i*)ptr) + 0);
  379. _4567 = _mm_loadu_si128(((__m128i*)ptr) + 1);
  380. }
  381. *r = _mm_packs_epi32(_mm_srai_epi32(_mm_slli_epi32(_0123, 16), 16),
  382. _mm_srai_epi32(_mm_slli_epi32(_4567, 16), 16));
  383. *g = _mm_packs_epi32(_mm_srai_epi32(_0123, 16),
  384. _mm_srai_epi32(_4567, 16));
  385. }
  386. SI void store2(uint16_t* ptr, size_t tail, U16 r, U16 g) {
  387. auto _0123 = _mm_unpacklo_epi16(r, g),
  388. _4567 = _mm_unpackhi_epi16(r, g);
  389. if (__builtin_expect(tail,0)) {
  390. const auto* s = &_0123;
  391. if (tail > 3) {
  392. _mm_storeu_si128((__m128i*)ptr, *s);
  393. s = &_4567;
  394. tail -= 4;
  395. ptr += 8;
  396. }
  397. bool high = false;
  398. if (tail > 1) {
  399. _mm_storel_epi64((__m128i*)ptr, *s);
  400. ptr += 4;
  401. tail -= 2;
  402. high = true;
  403. }
  404. if (tail > 0) {
  405. if (high) {
  406. *(int32_t*)ptr = _mm_extract_epi32(*s, 2);
  407. } else {
  408. *(int32_t*)ptr = _mm_cvtsi128_si32(*s);
  409. }
  410. }
  411. } else {
  412. _mm_storeu_si128((__m128i*)ptr + 0, _0123);
  413. _mm_storeu_si128((__m128i*)ptr + 1, _4567);
  414. }
  415. }
  416. SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
  417. __m128i _0,_1,_2,_3,_4,_5,_6,_7;
  418. if (__builtin_expect(tail,0)) {
  419. auto load_rgb = [](const uint16_t* src) {
  420. auto v = _mm_cvtsi32_si128(*(const uint32_t*)src);
  421. return _mm_insert_epi16(v, src[2], 2);
  422. };
  423. _1 = _2 = _3 = _4 = _5 = _6 = _7 = _mm_setzero_si128();
  424. if ( true ) { _0 = load_rgb(ptr + 0); }
  425. if (tail > 1) { _1 = load_rgb(ptr + 3); }
  426. if (tail > 2) { _2 = load_rgb(ptr + 6); }
  427. if (tail > 3) { _3 = load_rgb(ptr + 9); }
  428. if (tail > 4) { _4 = load_rgb(ptr + 12); }
  429. if (tail > 5) { _5 = load_rgb(ptr + 15); }
  430. if (tail > 6) { _6 = load_rgb(ptr + 18); }
  431. } else {
  432. // Load 0+1, 2+3, 4+5 normally, and 6+7 backed up 4 bytes so we don't run over.
  433. auto _01 = _mm_loadu_si128((const __m128i*)(ptr + 0)) ;
  434. auto _23 = _mm_loadu_si128((const __m128i*)(ptr + 6)) ;
  435. auto _45 = _mm_loadu_si128((const __m128i*)(ptr + 12)) ;
  436. auto _67 = _mm_srli_si128(_mm_loadu_si128((const __m128i*)(ptr + 16)), 4);
  437. _0 = _01; _1 = _mm_srli_si128(_01, 6);
  438. _2 = _23; _3 = _mm_srli_si128(_23, 6);
  439. _4 = _45; _5 = _mm_srli_si128(_45, 6);
  440. _6 = _67; _7 = _mm_srli_si128(_67, 6);
  441. }
  442. auto _02 = _mm_unpacklo_epi16(_0, _2), // r0 r2 g0 g2 b0 b2 xx xx
  443. _13 = _mm_unpacklo_epi16(_1, _3),
  444. _46 = _mm_unpacklo_epi16(_4, _6),
  445. _57 = _mm_unpacklo_epi16(_5, _7);
  446. auto rg0123 = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
  447. bx0123 = _mm_unpackhi_epi16(_02, _13), // b0 b1 b2 b3 xx xx xx xx
  448. rg4567 = _mm_unpacklo_epi16(_46, _57),
  449. bx4567 = _mm_unpackhi_epi16(_46, _57);
  450. *r = _mm_unpacklo_epi64(rg0123, rg4567);
  451. *g = _mm_unpackhi_epi64(rg0123, rg4567);
  452. *b = _mm_unpacklo_epi64(bx0123, bx4567);
  453. }
  454. SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
  455. __m128i _01, _23, _45, _67;
  456. if (__builtin_expect(tail,0)) {
  457. auto src = (const double*)ptr;
  458. _01 = _23 = _45 = _67 = _mm_setzero_si128();
  459. if (tail > 0) { _01 = _mm_loadl_pd(_01, src+0); }
  460. if (tail > 1) { _01 = _mm_loadh_pd(_01, src+1); }
  461. if (tail > 2) { _23 = _mm_loadl_pd(_23, src+2); }
  462. if (tail > 3) { _23 = _mm_loadh_pd(_23, src+3); }
  463. if (tail > 4) { _45 = _mm_loadl_pd(_45, src+4); }
  464. if (tail > 5) { _45 = _mm_loadh_pd(_45, src+5); }
  465. if (tail > 6) { _67 = _mm_loadl_pd(_67, src+6); }
  466. } else {
  467. _01 = _mm_loadu_si128(((__m128i*)ptr) + 0);
  468. _23 = _mm_loadu_si128(((__m128i*)ptr) + 1);
  469. _45 = _mm_loadu_si128(((__m128i*)ptr) + 2);
  470. _67 = _mm_loadu_si128(((__m128i*)ptr) + 3);
  471. }
  472. auto _02 = _mm_unpacklo_epi16(_01, _23), // r0 r2 g0 g2 b0 b2 a0 a2
  473. _13 = _mm_unpackhi_epi16(_01, _23), // r1 r3 g1 g3 b1 b3 a1 a3
  474. _46 = _mm_unpacklo_epi16(_45, _67),
  475. _57 = _mm_unpackhi_epi16(_45, _67);
  476. auto rg0123 = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
  477. ba0123 = _mm_unpackhi_epi16(_02, _13), // b0 b1 b2 b3 a0 a1 a2 a3
  478. rg4567 = _mm_unpacklo_epi16(_46, _57),
  479. ba4567 = _mm_unpackhi_epi16(_46, _57);
  480. *r = _mm_unpacklo_epi64(rg0123, rg4567);
  481. *g = _mm_unpackhi_epi64(rg0123, rg4567);
  482. *b = _mm_unpacklo_epi64(ba0123, ba4567);
  483. *a = _mm_unpackhi_epi64(ba0123, ba4567);
  484. }
  485. SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
  486. auto rg0123 = _mm_unpacklo_epi16(r, g), // r0 g0 r1 g1 r2 g2 r3 g3
  487. rg4567 = _mm_unpackhi_epi16(r, g), // r4 g4 r5 g5 r6 g6 r7 g7
  488. ba0123 = _mm_unpacklo_epi16(b, a),
  489. ba4567 = _mm_unpackhi_epi16(b, a);
  490. auto _01 = _mm_unpacklo_epi32(rg0123, ba0123),
  491. _23 = _mm_unpackhi_epi32(rg0123, ba0123),
  492. _45 = _mm_unpacklo_epi32(rg4567, ba4567),
  493. _67 = _mm_unpackhi_epi32(rg4567, ba4567);
  494. if (__builtin_expect(tail,0)) {
  495. auto dst = (double*)ptr;
  496. if (tail > 0) { _mm_storel_pd(dst+0, _01); }
  497. if (tail > 1) { _mm_storeh_pd(dst+1, _01); }
  498. if (tail > 2) { _mm_storel_pd(dst+2, _23); }
  499. if (tail > 3) { _mm_storeh_pd(dst+3, _23); }
  500. if (tail > 4) { _mm_storel_pd(dst+4, _45); }
  501. if (tail > 5) { _mm_storeh_pd(dst+5, _45); }
  502. if (tail > 6) { _mm_storel_pd(dst+6, _67); }
  503. } else {
  504. _mm_storeu_si128((__m128i*)ptr + 0, _01);
  505. _mm_storeu_si128((__m128i*)ptr + 1, _23);
  506. _mm_storeu_si128((__m128i*)ptr + 2, _45);
  507. _mm_storeu_si128((__m128i*)ptr + 3, _67);
  508. }
  509. }
  510. SI void load2(const float* ptr, size_t tail, F* r, F* g) {
  511. F _0123, _4567;
  512. if (__builtin_expect(tail, 0)) {
  513. _0123 = _4567 = _mm256_setzero_ps();
  514. F* d = &_0123;
  515. if (tail > 3) {
  516. *d = _mm256_loadu_ps(ptr);
  517. ptr += 8;
  518. tail -= 4;
  519. d = &_4567;
  520. }
  521. bool high = false;
  522. if (tail > 1) {
  523. *d = _mm256_castps128_ps256(_mm_loadu_ps(ptr));
  524. ptr += 4;
  525. tail -= 2;
  526. high = true;
  527. }
  528. if (tail > 0) {
  529. *d = high ? _mm256_insertf128_ps(*d, _mm_loadu_si64(ptr), 1)
  530. : _mm256_insertf128_ps(*d, _mm_loadu_si64(ptr), 0);
  531. }
  532. } else {
  533. _0123 = _mm256_loadu_ps(ptr + 0);
  534. _4567 = _mm256_loadu_ps(ptr + 8);
  535. }
  536. F _0145 = _mm256_permute2f128_pd(_0123, _4567, 0x20),
  537. _2367 = _mm256_permute2f128_pd(_0123, _4567, 0x31);
  538. *r = _mm256_shuffle_ps(_0145, _2367, 0x88);
  539. *g = _mm256_shuffle_ps(_0145, _2367, 0xDD);
  540. }
  541. SI void store2(float* ptr, size_t tail, F r, F g) {
  542. F _0145 = _mm256_unpacklo_ps(r, g),
  543. _2367 = _mm256_unpackhi_ps(r, g);
  544. F _0123 = _mm256_permute2f128_pd(_0145, _2367, 0x20),
  545. _4567 = _mm256_permute2f128_pd(_0145, _2367, 0x31);
  546. if (__builtin_expect(tail, 0)) {
  547. const __m256* s = &_0123;
  548. if (tail > 3) {
  549. _mm256_storeu_ps(ptr, *s);
  550. s = &_4567;
  551. tail -= 4;
  552. ptr += 8;
  553. }
  554. bool high = false;
  555. if (tail > 1) {
  556. _mm_storeu_ps(ptr, _mm256_extractf128_ps(*s, 0));
  557. ptr += 4;
  558. tail -= 2;
  559. high = true;
  560. }
  561. if (tail > 0) {
  562. *(ptr + 0) = (*s)[ high ? 4 : 0];
  563. *(ptr + 1) = (*s)[ high ? 5 : 1];
  564. }
  565. } else {
  566. _mm256_storeu_ps(ptr + 0, _0123);
  567. _mm256_storeu_ps(ptr + 8, _4567);
  568. }
  569. }
  570. SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
  571. F _04, _15, _26, _37;
  572. _04 = _15 = _26 = _37 = 0;
  573. switch (tail) {
  574. case 0: _37 = _mm256_insertf128_ps(_37, _mm_loadu_ps(ptr+28), 1);
  575. case 7: _26 = _mm256_insertf128_ps(_26, _mm_loadu_ps(ptr+24), 1);
  576. case 6: _15 = _mm256_insertf128_ps(_15, _mm_loadu_ps(ptr+20), 1);
  577. case 5: _04 = _mm256_insertf128_ps(_04, _mm_loadu_ps(ptr+16), 1);
  578. case 4: _37 = _mm256_insertf128_ps(_37, _mm_loadu_ps(ptr+12), 0);
  579. case 3: _26 = _mm256_insertf128_ps(_26, _mm_loadu_ps(ptr+ 8), 0);
  580. case 2: _15 = _mm256_insertf128_ps(_15, _mm_loadu_ps(ptr+ 4), 0);
  581. case 1: _04 = _mm256_insertf128_ps(_04, _mm_loadu_ps(ptr+ 0), 0);
  582. }
  583. F rg0145 = _mm256_unpacklo_ps(_04,_15), // r0 r1 g0 g1 | r4 r5 g4 g5
  584. ba0145 = _mm256_unpackhi_ps(_04,_15),
  585. rg2367 = _mm256_unpacklo_ps(_26,_37),
  586. ba2367 = _mm256_unpackhi_ps(_26,_37);
  587. *r = _mm256_unpacklo_pd(rg0145, rg2367);
  588. *g = _mm256_unpackhi_pd(rg0145, rg2367);
  589. *b = _mm256_unpacklo_pd(ba0145, ba2367);
  590. *a = _mm256_unpackhi_pd(ba0145, ba2367);
  591. }
  592. SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
  593. F rg0145 = _mm256_unpacklo_ps(r, g), // r0 g0 r1 g1 | r4 g4 r5 g5
  594. rg2367 = _mm256_unpackhi_ps(r, g), // r2 ... | r6 ...
  595. ba0145 = _mm256_unpacklo_ps(b, a), // b0 a0 b1 a1 | b4 a4 b5 a5
  596. ba2367 = _mm256_unpackhi_ps(b, a); // b2 ... | b6 ...
  597. F _04 = _mm256_unpacklo_pd(rg0145, ba0145), // r0 g0 b0 a0 | r4 g4 b4 a4
  598. _15 = _mm256_unpackhi_pd(rg0145, ba0145), // r1 ... | r5 ...
  599. _26 = _mm256_unpacklo_pd(rg2367, ba2367), // r2 ... | r6 ...
  600. _37 = _mm256_unpackhi_pd(rg2367, ba2367); // r3 ... | r7 ...
  601. if (__builtin_expect(tail, 0)) {
  602. if (tail > 0) { _mm_storeu_ps(ptr+ 0, _mm256_extractf128_ps(_04, 0)); }
  603. if (tail > 1) { _mm_storeu_ps(ptr+ 4, _mm256_extractf128_ps(_15, 0)); }
  604. if (tail > 2) { _mm_storeu_ps(ptr+ 8, _mm256_extractf128_ps(_26, 0)); }
  605. if (tail > 3) { _mm_storeu_ps(ptr+12, _mm256_extractf128_ps(_37, 0)); }
  606. if (tail > 4) { _mm_storeu_ps(ptr+16, _mm256_extractf128_ps(_04, 1)); }
  607. if (tail > 5) { _mm_storeu_ps(ptr+20, _mm256_extractf128_ps(_15, 1)); }
  608. if (tail > 6) { _mm_storeu_ps(ptr+24, _mm256_extractf128_ps(_26, 1)); }
  609. } else {
  610. F _01 = _mm256_permute2f128_ps(_04, _15, 32), // 32 == 0010 0000 == lo, lo
  611. _23 = _mm256_permute2f128_ps(_26, _37, 32),
  612. _45 = _mm256_permute2f128_ps(_04, _15, 49), // 49 == 0011 0001 == hi, hi
  613. _67 = _mm256_permute2f128_ps(_26, _37, 49);
  614. _mm256_storeu_ps(ptr+ 0, _01);
  615. _mm256_storeu_ps(ptr+ 8, _23);
  616. _mm256_storeu_ps(ptr+16, _45);
  617. _mm256_storeu_ps(ptr+24, _67);
  618. }
  619. }
  620. #elif defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41)
  621. template <typename T> using V = T __attribute__((ext_vector_type(4)));
  622. using F = V<float >;
  623. using I32 = V< int32_t>;
  624. using U64 = V<uint64_t>;
  625. using U32 = V<uint32_t>;
  626. using U16 = V<uint16_t>;
  627. using U8 = V<uint8_t >;
  628. SI F mad(F f, F m, F a) { return f*m+a; }
  629. SI F min(F a, F b) { return _mm_min_ps(a,b); }
  630. SI F max(F a, F b) { return _mm_max_ps(a,b); }
  631. SI F abs_(F v) { return _mm_and_ps(v, 0-v); }
  632. SI F rcp (F v) { return _mm_rcp_ps (v); }
  633. SI F rsqrt (F v) { return _mm_rsqrt_ps(v); }
  634. SI F sqrt_(F v) { return _mm_sqrt_ps (v); }
  635. SI U32 round(F v, F scale) { return _mm_cvtps_epi32(v*scale); }
  636. SI U16 pack(U32 v) {
  637. #if defined(JUMPER_IS_SSE41)
  638. auto p = _mm_packus_epi32(v,v);
  639. #else
  640. // Sign extend so that _mm_packs_epi32() does the pack we want.
  641. auto p = _mm_srai_epi32(_mm_slli_epi32(v, 16), 16);
  642. p = _mm_packs_epi32(p,p);
  643. #endif
  644. return sk_unaligned_load<U16>(&p); // We have two copies. Return (the lower) one.
  645. }
  646. SI U8 pack(U16 v) {
  647. auto r = widen_cast<__m128i>(v);
  648. r = _mm_packus_epi16(r,r);
  649. return sk_unaligned_load<U8>(&r);
  650. }
  651. SI F if_then_else(I32 c, F t, F e) {
  652. return _mm_or_ps(_mm_and_ps(c, t), _mm_andnot_ps(c, e));
  653. }
  654. SI F floor_(F v) {
  655. #if defined(JUMPER_IS_SSE41)
  656. return _mm_floor_ps(v);
  657. #else
  658. F roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(v));
  659. return roundtrip - if_then_else(roundtrip > v, 1, 0);
  660. #endif
  661. }
  662. template <typename T>
  663. SI V<T> gather(const T* p, U32 ix) {
  664. return {p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]]};
  665. }
  666. SI void load2(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
  667. __m128i _01;
  668. if (__builtin_expect(tail,0)) {
  669. _01 = _mm_setzero_si128();
  670. if (tail > 1) {
  671. _01 = _mm_loadl_pd(_01, (const double*)ptr); // r0 g0 r1 g1 00 00 00 00
  672. if (tail > 2) {
  673. _01 = _mm_loadh_pi(_01, (__m64 const* )(ptr + 4)); // r0 g0 r1 g1 r2 g2 00 00
  674. }
  675. } else {
  676. _01 = _mm_loadl_pi(_01, (__m64 const*)ptr + 0); // r0 g0 00 00 00 00 00 00
  677. }
  678. } else {
  679. _01 = _mm_loadu_si128(((__m128i*)ptr) + 0); // r0 g0 r1 g1 r2 g2 r3 g3
  680. }
  681. auto rg01_23 = _mm_shufflelo_epi16(_01, 0xD8); // r0 r1 g0 g1 r2 g2 r3 g3
  682. auto rg = _mm_shufflehi_epi16(rg01_23, 0xD8); // r0 r1 g0 g1 r2 r3 g2 g3
  683. auto R = _mm_shuffle_epi32(rg, 0x88); // r0 r1 r2 r3 r0 r1 r2 r3
  684. auto G = _mm_shuffle_epi32(rg, 0xDD); // g0 g1 g2 g3 g0 g1 g2 g3
  685. *r = sk_unaligned_load<U16>(&R);
  686. *g = sk_unaligned_load<U16>(&G);
  687. }
  688. SI void store2(uint16_t* ptr, size_t tail, U16 r, U16 g) {
  689. U32 rg = _mm_unpacklo_epi16(widen_cast<__m128i>(r), widen_cast<__m128i>(g));
  690. if (__builtin_expect(tail, 0)) {
  691. if (tail > 1) {
  692. _mm_storel_epi64((__m128i*)ptr, rg);
  693. if (tail > 2) {
  694. int32_t rgpair = rg[2];
  695. memcpy(ptr + 4, &rgpair, sizeof(rgpair));
  696. }
  697. } else {
  698. int32_t rgpair = rg[0];
  699. memcpy(ptr, &rgpair, sizeof(rgpair));
  700. }
  701. } else {
  702. _mm_storeu_si128((__m128i*)ptr + 0, rg);
  703. }
  704. }
  705. SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
  706. __m128i _0, _1, _2, _3;
  707. if (__builtin_expect(tail,0)) {
  708. _1 = _2 = _3 = _mm_setzero_si128();
  709. auto load_rgb = [](const uint16_t* src) {
  710. auto v = _mm_cvtsi32_si128(*(const uint32_t*)src);
  711. return _mm_insert_epi16(v, src[2], 2);
  712. };
  713. if ( true ) { _0 = load_rgb(ptr + 0); }
  714. if (tail > 1) { _1 = load_rgb(ptr + 3); }
  715. if (tail > 2) { _2 = load_rgb(ptr + 6); }
  716. } else {
  717. // Load slightly weirdly to make sure we don't load past the end of 4x48 bits.
  718. auto _01 = _mm_loadu_si128((const __m128i*)(ptr + 0)) ,
  719. _23 = _mm_srli_si128(_mm_loadu_si128((const __m128i*)(ptr + 4)), 4);
  720. // Each _N holds R,G,B for pixel N in its lower 3 lanes (upper 5 are ignored).
  721. _0 = _01;
  722. _1 = _mm_srli_si128(_01, 6);
  723. _2 = _23;
  724. _3 = _mm_srli_si128(_23, 6);
  725. }
  726. // De-interlace to R,G,B.
  727. auto _02 = _mm_unpacklo_epi16(_0, _2), // r0 r2 g0 g2 b0 b2 xx xx
  728. _13 = _mm_unpacklo_epi16(_1, _3); // r1 r3 g1 g3 b1 b3 xx xx
  729. auto R = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
  730. G = _mm_srli_si128(R, 8),
  731. B = _mm_unpackhi_epi16(_02, _13); // b0 b1 b2 b3 xx xx xx xx
  732. *r = sk_unaligned_load<U16>(&R);
  733. *g = sk_unaligned_load<U16>(&G);
  734. *b = sk_unaligned_load<U16>(&B);
  735. }
  736. SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
  737. __m128i _01, _23;
  738. if (__builtin_expect(tail,0)) {
  739. _01 = _23 = _mm_setzero_si128();
  740. auto src = (const double*)ptr;
  741. if ( true ) { _01 = _mm_loadl_pd(_01, src + 0); } // r0 g0 b0 a0 00 00 00 00
  742. if (tail > 1) { _01 = _mm_loadh_pd(_01, src + 1); } // r0 g0 b0 a0 r1 g1 b1 a1
  743. if (tail > 2) { _23 = _mm_loadl_pd(_23, src + 2); } // r2 g2 b2 a2 00 00 00 00
  744. } else {
  745. _01 = _mm_loadu_si128(((__m128i*)ptr) + 0); // r0 g0 b0 a0 r1 g1 b1 a1
  746. _23 = _mm_loadu_si128(((__m128i*)ptr) + 1); // r2 g2 b2 a2 r3 g3 b3 a3
  747. }
  748. auto _02 = _mm_unpacklo_epi16(_01, _23), // r0 r2 g0 g2 b0 b2 a0 a2
  749. _13 = _mm_unpackhi_epi16(_01, _23); // r1 r3 g1 g3 b1 b3 a1 a3
  750. auto rg = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
  751. ba = _mm_unpackhi_epi16(_02, _13); // b0 b1 b2 b3 a0 a1 a2 a3
  752. *r = sk_unaligned_load<U16>((uint16_t*)&rg + 0);
  753. *g = sk_unaligned_load<U16>((uint16_t*)&rg + 4);
  754. *b = sk_unaligned_load<U16>((uint16_t*)&ba + 0);
  755. *a = sk_unaligned_load<U16>((uint16_t*)&ba + 4);
  756. }
  757. SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
  758. auto rg = _mm_unpacklo_epi16(widen_cast<__m128i>(r), widen_cast<__m128i>(g)),
  759. ba = _mm_unpacklo_epi16(widen_cast<__m128i>(b), widen_cast<__m128i>(a));
  760. if (__builtin_expect(tail, 0)) {
  761. auto dst = (double*)ptr;
  762. if ( true ) { _mm_storel_pd(dst + 0, _mm_unpacklo_epi32(rg, ba)); }
  763. if (tail > 1) { _mm_storeh_pd(dst + 1, _mm_unpacklo_epi32(rg, ba)); }
  764. if (tail > 2) { _mm_storel_pd(dst + 2, _mm_unpackhi_epi32(rg, ba)); }
  765. } else {
  766. _mm_storeu_si128((__m128i*)ptr + 0, _mm_unpacklo_epi32(rg, ba));
  767. _mm_storeu_si128((__m128i*)ptr + 1, _mm_unpackhi_epi32(rg, ba));
  768. }
  769. }
  770. SI void load2(const float* ptr, size_t tail, F* r, F* g) {
  771. F _01, _23;
  772. if (__builtin_expect(tail, 0)) {
  773. _01 = _23 = _mm_setzero_si128();
  774. if ( true ) { _01 = _mm_loadl_pi(_01, (__m64 const*)(ptr + 0)); }
  775. if (tail > 1) { _01 = _mm_loadh_pi(_01, (__m64 const*)(ptr + 2)); }
  776. if (tail > 2) { _23 = _mm_loadl_pi(_23, (__m64 const*)(ptr + 4)); }
  777. } else {
  778. _01 = _mm_loadu_ps(ptr + 0);
  779. _23 = _mm_loadu_ps(ptr + 4);
  780. }
  781. *r = _mm_shuffle_ps(_01, _23, 0x88);
  782. *g = _mm_shuffle_ps(_01, _23, 0xDD);
  783. }
  784. SI void store2(float* ptr, size_t tail, F r, F g) {
  785. F _01 = _mm_unpacklo_ps(r, g),
  786. _23 = _mm_unpackhi_ps(r, g);
  787. if (__builtin_expect(tail, 0)) {
  788. if ( true ) { _mm_storel_pi((__m64*)(ptr + 0), _01); }
  789. if (tail > 1) { _mm_storeh_pi((__m64*)(ptr + 2), _01); }
  790. if (tail > 2) { _mm_storel_pi((__m64*)(ptr + 4), _23); }
  791. } else {
  792. _mm_storeu_ps(ptr + 0, _01);
  793. _mm_storeu_ps(ptr + 4, _23);
  794. }
  795. }
  796. SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
  797. F _0, _1, _2, _3;
  798. if (__builtin_expect(tail, 0)) {
  799. _1 = _2 = _3 = _mm_setzero_si128();
  800. if ( true ) { _0 = _mm_loadu_ps(ptr + 0); }
  801. if (tail > 1) { _1 = _mm_loadu_ps(ptr + 4); }
  802. if (tail > 2) { _2 = _mm_loadu_ps(ptr + 8); }
  803. } else {
  804. _0 = _mm_loadu_ps(ptr + 0);
  805. _1 = _mm_loadu_ps(ptr + 4);
  806. _2 = _mm_loadu_ps(ptr + 8);
  807. _3 = _mm_loadu_ps(ptr +12);
  808. }
  809. _MM_TRANSPOSE4_PS(_0,_1,_2,_3);
  810. *r = _0;
  811. *g = _1;
  812. *b = _2;
  813. *a = _3;
  814. }
  815. SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
  816. _MM_TRANSPOSE4_PS(r,g,b,a);
  817. if (__builtin_expect(tail, 0)) {
  818. if ( true ) { _mm_storeu_ps(ptr + 0, r); }
  819. if (tail > 1) { _mm_storeu_ps(ptr + 4, g); }
  820. if (tail > 2) { _mm_storeu_ps(ptr + 8, b); }
  821. } else {
  822. _mm_storeu_ps(ptr + 0, r);
  823. _mm_storeu_ps(ptr + 4, g);
  824. _mm_storeu_ps(ptr + 8, b);
  825. _mm_storeu_ps(ptr +12, a);
  826. }
  827. }
  828. #endif
  829. // We need to be a careful with casts.
  830. // (F)x means cast x to float in the portable path, but bit_cast x to float in the others.
  831. // These named casts and bit_cast() are always what they seem to be.
  832. #if defined(JUMPER_IS_SCALAR)
  833. SI F cast (U32 v) { return (F)v; }
  834. SI F cast64(U64 v) { return (F)v; }
  835. SI U32 trunc_(F v) { return (U32)v; }
  836. SI U32 expand(U16 v) { return (U32)v; }
  837. SI U32 expand(U8 v) { return (U32)v; }
  838. #else
  839. SI F cast (U32 v) { return __builtin_convertvector((I32)v, F); }
  840. SI F cast64(U64 v) { return __builtin_convertvector( v, F); }
  841. SI U32 trunc_(F v) { return (U32)__builtin_convertvector( v, I32); }
  842. SI U32 expand(U16 v) { return __builtin_convertvector( v, U32); }
  843. SI U32 expand(U8 v) { return __builtin_convertvector( v, U32); }
  844. #endif
  845. template <typename V>
  846. SI V if_then_else(I32 c, V t, V e) {
  847. return bit_cast<V>(if_then_else(c, bit_cast<F>(t), bit_cast<F>(e)));
  848. }
  849. SI U16 bswap(U16 x) {
  850. #if defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41)
  851. // Somewhat inexplicably Clang decides to do (x<<8) | (x>>8) in 32-bit lanes
  852. // when generating code for SSE2 and SSE4.1. We'll do it manually...
  853. auto v = widen_cast<__m128i>(x);
  854. v = _mm_slli_epi16(v,8) | _mm_srli_epi16(v,8);
  855. return sk_unaligned_load<U16>(&v);
  856. #else
  857. return (x<<8) | (x>>8);
  858. #endif
  859. }
  860. SI F fract(F v) { return v - floor_(v); }
  861. // See http://www.machinedlearnings.com/2011/06/fast-approximate-logarithm-exponential.html.
  862. SI F approx_log2(F x) {
  863. // e - 127 is a fair approximation of log2(x) in its own right...
  864. F e = cast(bit_cast<U32>(x)) * (1.0f / (1<<23));
  865. // ... but using the mantissa to refine its error is _much_ better.
  866. F m = bit_cast<F>((bit_cast<U32>(x) & 0x007fffff) | 0x3f000000);
  867. return e
  868. - 124.225514990f
  869. - 1.498030302f * m
  870. - 1.725879990f / (0.3520887068f + m);
  871. }
  872. SI F approx_pow2(F x) {
  873. F f = fract(x);
  874. return bit_cast<F>(round(1.0f * (1<<23),
  875. x + 121.274057500f
  876. - 1.490129070f * f
  877. + 27.728023300f / (4.84252568f - f)));
  878. }
  879. SI F approx_powf(F x, F y) {
  880. #if defined(SK_LEGACY_APPROX_POWF_SPECIALCASE)
  881. return if_then_else((x == 0) , 0
  882. #else
  883. return if_then_else((x == 0)|(x == 1), x
  884. #endif
  885. , approx_pow2(approx_log2(x) * y));
  886. }
  887. SI F from_half(U16 h) {
  888. #if defined(JUMPER_IS_NEON) && defined(SK_CPU_ARM64) \
  889. && !defined(SK_BUILD_FOR_GOOGLE3) // Temporary workaround for some Google3 builds.
  890. return vcvt_f32_f16(h);
  891. #elif defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
  892. return _mm256_cvtph_ps(h);
  893. #else
  894. // Remember, a half is 1-5-10 (sign-exponent-mantissa) with 15 exponent bias.
  895. U32 sem = expand(h),
  896. s = sem & 0x8000,
  897. em = sem ^ s;
  898. // Convert to 1-8-23 float with 127 bias, flushing denorm halfs (including zero) to zero.
  899. auto denorm = (I32)em < 0x0400; // I32 comparison is often quicker, and always safe here.
  900. return if_then_else(denorm, F(0)
  901. , bit_cast<F>( (s<<16) + (em<<13) + ((127-15)<<23) ));
  902. #endif
  903. }
  904. SI U16 to_half(F f) {
  905. #if defined(JUMPER_IS_NEON) && defined(SK_CPU_ARM64) \
  906. && !defined(SK_BUILD_FOR_GOOGLE3) // Temporary workaround for some Google3 builds.
  907. return vcvt_f16_f32(f);
  908. #elif defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
  909. return _mm256_cvtps_ph(f, _MM_FROUND_CUR_DIRECTION);
  910. #else
  911. // Remember, a float is 1-8-23 (sign-exponent-mantissa) with 127 exponent bias.
  912. U32 sem = bit_cast<U32>(f),
  913. s = sem & 0x80000000,
  914. em = sem ^ s;
  915. // Convert to 1-5-10 half with 15 bias, flushing denorm halfs (including zero) to zero.
  916. auto denorm = (I32)em < 0x38800000; // I32 comparison is often quicker, and always safe here.
  917. return pack(if_then_else(denorm, U32(0)
  918. , (s>>16) + (em>>13) - ((127-15)<<10)));
  919. #endif
  920. }
  921. // Our fundamental vector depth is our pixel stride.
  922. static const size_t N = sizeof(F) / sizeof(float);
  923. // We're finally going to get to what a Stage function looks like!
  924. // tail == 0 ~~> work on a full N pixels
  925. // tail != 0 ~~> work on only the first tail pixels
  926. // tail is always < N.
  927. // Any custom ABI to use for all (non-externally-facing) stage functions?
  928. // Also decide here whether to use narrow (compromise) or wide (ideal) stages.
  929. #if defined(SK_CPU_ARM32) && defined(JUMPER_IS_NEON)
  930. // This lets us pass vectors more efficiently on 32-bit ARM.
  931. // We can still only pass 16 floats, so best as 4x {r,g,b,a}.
  932. #define ABI __attribute__((pcs("aapcs-vfp")))
  933. #define JUMPER_NARROW_STAGES 1
  934. #elif 0 && defined(_MSC_VER) && defined(__clang__) && defined(__x86_64__)
  935. // SysV ABI makes it very sensible to use wide stages with clang-cl.
  936. // TODO: crashes during compilation :(
  937. #define ABI __attribute__((sysv_abi))
  938. #define JUMPER_NARROW_STAGES 0
  939. #elif defined(_MSC_VER)
  940. // Even if not vectorized, this lets us pass {r,g,b,a} as registers,
  941. // instead of {b,a} on the stack. Narrow stages work best for __vectorcall.
  942. #define ABI __vectorcall
  943. #define JUMPER_NARROW_STAGES 1
  944. #elif defined(__x86_64__) || defined(SK_CPU_ARM64)
  945. // These platforms are ideal for wider stages, and their default ABI is ideal.
  946. #define ABI
  947. #define JUMPER_NARROW_STAGES 0
  948. #else
  949. // 32-bit or unknown... shunt them down the narrow path.
  950. // Odds are these have few registers and are better off there.
  951. #define ABI
  952. #define JUMPER_NARROW_STAGES 1
  953. #endif
  954. #if JUMPER_NARROW_STAGES
  955. struct Params {
  956. size_t dx, dy, tail;
  957. F dr,dg,db,da;
  958. };
  959. using Stage = void(ABI*)(Params*, void** program, F r, F g, F b, F a);
  960. #else
  961. // We keep program the second argument, so that it's passed in rsi for load_and_inc().
  962. using Stage = void(ABI*)(size_t tail, void** program, size_t dx, size_t dy, F,F,F,F, F,F,F,F);
  963. #endif
  964. static void start_pipeline(size_t dx, size_t dy, size_t xlimit, size_t ylimit, void** program) {
  965. auto start = (Stage)load_and_inc(program);
  966. const size_t x0 = dx;
  967. for (; dy < ylimit; dy++) {
  968. #if JUMPER_NARROW_STAGES
  969. Params params = { x0,dy,0, 0,0,0,0 };
  970. while (params.dx + N <= xlimit) {
  971. start(&params,program, 0,0,0,0);
  972. params.dx += N;
  973. }
  974. if (size_t tail = xlimit - params.dx) {
  975. params.tail = tail;
  976. start(&params,program, 0,0,0,0);
  977. }
  978. #else
  979. dx = x0;
  980. while (dx + N <= xlimit) {
  981. start(0,program,dx,dy, 0,0,0,0, 0,0,0,0);
  982. dx += N;
  983. }
  984. if (size_t tail = xlimit - dx) {
  985. start(tail,program,dx,dy, 0,0,0,0, 0,0,0,0);
  986. }
  987. #endif
  988. }
  989. }
  990. #if JUMPER_NARROW_STAGES
  991. #define STAGE(name, ...) \
  992. SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, \
  993. F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da); \
  994. static void ABI name(Params* params, void** program, \
  995. F r, F g, F b, F a) { \
  996. name##_k(Ctx{program},params->dx,params->dy,params->tail, r,g,b,a, \
  997. params->dr, params->dg, params->db, params->da); \
  998. auto next = (Stage)load_and_inc(program); \
  999. next(params,program, r,g,b,a); \
  1000. } \
  1001. SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, \
  1002. F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da)
  1003. #else
  1004. #define STAGE(name, ...) \
  1005. SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, \
  1006. F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da); \
  1007. static void ABI name(size_t tail, void** program, size_t dx, size_t dy, \
  1008. F r, F g, F b, F a, F dr, F dg, F db, F da) { \
  1009. name##_k(Ctx{program},dx,dy,tail, r,g,b,a, dr,dg,db,da); \
  1010. auto next = (Stage)load_and_inc(program); \
  1011. next(tail,program,dx,dy, r,g,b,a, dr,dg,db,da); \
  1012. } \
  1013. SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, \
  1014. F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da)
  1015. #endif
  1016. // just_return() is a simple no-op stage that only exists to end the chain,
  1017. // returning back up to start_pipeline(), and from there to the caller.
  1018. #if JUMPER_NARROW_STAGES
  1019. static void ABI just_return(Params*, void**, F,F,F,F) {}
  1020. #else
  1021. static void ABI just_return(size_t, void**, size_t,size_t, F,F,F,F, F,F,F,F) {}
  1022. #endif
  1023. // We could start defining normal Stages now. But first, some helper functions.
  1024. // These load() and store() methods are tail-aware,
  1025. // but focus mainly on keeping the at-stride tail==0 case fast.
  1026. template <typename V, typename T>
  1027. SI V load(const T* src, size_t tail) {
  1028. #if !defined(JUMPER_IS_SCALAR)
  1029. __builtin_assume(tail < N);
  1030. if (__builtin_expect(tail, 0)) {
  1031. V v{}; // Any inactive lanes are zeroed.
  1032. switch (tail) {
  1033. case 7: v[6] = src[6];
  1034. case 6: v[5] = src[5];
  1035. case 5: v[4] = src[4];
  1036. case 4: memcpy(&v, src, 4*sizeof(T)); break;
  1037. case 3: v[2] = src[2];
  1038. case 2: memcpy(&v, src, 2*sizeof(T)); break;
  1039. case 1: memcpy(&v, src, 1*sizeof(T)); break;
  1040. }
  1041. return v;
  1042. }
  1043. #endif
  1044. return sk_unaligned_load<V>(src);
  1045. }
  1046. template <typename V, typename T>
  1047. SI void store(T* dst, V v, size_t tail) {
  1048. #if !defined(JUMPER_IS_SCALAR)
  1049. __builtin_assume(tail < N);
  1050. if (__builtin_expect(tail, 0)) {
  1051. switch (tail) {
  1052. case 7: dst[6] = v[6];
  1053. case 6: dst[5] = v[5];
  1054. case 5: dst[4] = v[4];
  1055. case 4: memcpy(dst, &v, 4*sizeof(T)); break;
  1056. case 3: dst[2] = v[2];
  1057. case 2: memcpy(dst, &v, 2*sizeof(T)); break;
  1058. case 1: memcpy(dst, &v, 1*sizeof(T)); break;
  1059. }
  1060. return;
  1061. }
  1062. #endif
  1063. sk_unaligned_store(dst, v);
  1064. }
  1065. SI F from_byte(U8 b) {
  1066. return cast(expand(b)) * (1/255.0f);
  1067. }
  1068. SI F from_short(U16 s) {
  1069. return cast(expand(s)) * (1/65535.0f);
  1070. }
  1071. SI void from_565(U16 _565, F* r, F* g, F* b) {
  1072. U32 wide = expand(_565);
  1073. *r = cast(wide & (31<<11)) * (1.0f / (31<<11));
  1074. *g = cast(wide & (63<< 5)) * (1.0f / (63<< 5));
  1075. *b = cast(wide & (31<< 0)) * (1.0f / (31<< 0));
  1076. }
  1077. SI void from_4444(U16 _4444, F* r, F* g, F* b, F* a) {
  1078. U32 wide = expand(_4444);
  1079. *r = cast(wide & (15<<12)) * (1.0f / (15<<12));
  1080. *g = cast(wide & (15<< 8)) * (1.0f / (15<< 8));
  1081. *b = cast(wide & (15<< 4)) * (1.0f / (15<< 4));
  1082. *a = cast(wide & (15<< 0)) * (1.0f / (15<< 0));
  1083. }
  1084. SI void from_8888(U32 _8888, F* r, F* g, F* b, F* a) {
  1085. *r = cast((_8888 ) & 0xff) * (1/255.0f);
  1086. *g = cast((_8888 >> 8) & 0xff) * (1/255.0f);
  1087. *b = cast((_8888 >> 16) & 0xff) * (1/255.0f);
  1088. *a = cast((_8888 >> 24) ) * (1/255.0f);
  1089. }
  1090. SI void from_88(U16 _88, F* r, F* g) {
  1091. U32 wide = expand(_88);
  1092. *r = cast((wide ) & 0xff) * (1/255.0f);
  1093. *g = cast((wide >> 8) & 0xff) * (1/255.0f);
  1094. }
  1095. SI void from_1010102(U32 rgba, F* r, F* g, F* b, F* a) {
  1096. *r = cast((rgba ) & 0x3ff) * (1/1023.0f);
  1097. *g = cast((rgba >> 10) & 0x3ff) * (1/1023.0f);
  1098. *b = cast((rgba >> 20) & 0x3ff) * (1/1023.0f);
  1099. *a = cast((rgba >> 30) ) * (1/ 3.0f);
  1100. }
  1101. SI void from_1616(U32 _1616, F* r, F* g) {
  1102. *r = cast((_1616 ) & 0xffff) * (1/65535.0f);
  1103. *g = cast((_1616 >> 16) & 0xffff) * (1/65535.0f);
  1104. }
  1105. SI void from_16161616(U64 _16161616, F* r, F* g, F* b, F* a) {
  1106. *r = cast64((_16161616 ) & 0xffff) * (1/65535.0f);
  1107. *g = cast64((_16161616 >> 16) & 0xffff) * (1/65535.0f);
  1108. *b = cast64((_16161616 >> 32) & 0xffff) * (1/65535.0f);
  1109. *a = cast64((_16161616 >> 48) & 0xffff) * (1/65535.0f);
  1110. }
  1111. // Used by load_ and store_ stages to get to the right (dx,dy) starting point of contiguous memory.
  1112. template <typename T>
  1113. SI T* ptr_at_xy(const SkRasterPipeline_MemoryCtx* ctx, size_t dx, size_t dy) {
  1114. return (T*)ctx->pixels + dy*ctx->stride + dx;
  1115. }
  1116. // clamp v to [0,limit).
  1117. SI F clamp(F v, F limit) {
  1118. F inclusive = bit_cast<F>( bit_cast<U32>(limit) - 1 ); // Exclusive -> inclusive.
  1119. return min(max(0, v), inclusive);
  1120. }
  1121. // Used by gather_ stages to calculate the base pointer and a vector of indices to load.
  1122. template <typename T>
  1123. SI U32 ix_and_ptr(T** ptr, const SkRasterPipeline_GatherCtx* ctx, F x, F y) {
  1124. x = clamp(x, ctx->width);
  1125. y = clamp(y, ctx->height);
  1126. *ptr = (const T*)ctx->pixels;
  1127. return trunc_(y)*ctx->stride + trunc_(x);
  1128. }
  1129. // We often have a nominally [0,1] float value we need to scale and convert to an integer,
  1130. // whether for a table lookup or to pack back down into bytes for storage.
  1131. //
  1132. // In practice, especially when dealing with interesting color spaces, that notionally
  1133. // [0,1] float may be out of [0,1] range. Unorms cannot represent that, so we must clamp.
  1134. //
  1135. // You can adjust the expected input to [0,bias] by tweaking that parameter.
  1136. SI U32 to_unorm(F v, F scale, F bias = 1.0f) {
  1137. // TODO: platform-specific implementations to to_unorm(), removing round() entirely?
  1138. // Any time we use round() we probably want to use to_unorm().
  1139. return round(min(max(0, v), bias), scale);
  1140. }
  1141. SI I32 cond_to_mask(I32 cond) { return if_then_else(cond, I32(~0), I32(0)); }
  1142. // Now finally, normal Stages!
  1143. STAGE(seed_shader, Ctx::None) {
  1144. static const float iota[] = {
  1145. 0.5f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5f, 6.5f, 7.5f,
  1146. 8.5f, 9.5f,10.5f,11.5f,12.5f,13.5f,14.5f,15.5f,
  1147. };
  1148. // It's important for speed to explicitly cast(dx) and cast(dy),
  1149. // which has the effect of splatting them to vectors before converting to floats.
  1150. // On Intel this breaks a data dependency on previous loop iterations' registers.
  1151. r = cast(dx) + sk_unaligned_load<F>(iota);
  1152. g = cast(dy) + 0.5f;
  1153. b = 1.0f;
  1154. a = 0;
  1155. dr = dg = db = da = 0;
  1156. }
  1157. STAGE(dither, const float* rate) {
  1158. // Get [(dx,dy), (dx+1,dy), (dx+2,dy), ...] loaded up in integer vectors.
  1159. uint32_t iota[] = {0,1,2,3,4,5,6,7};
  1160. U32 X = dx + sk_unaligned_load<U32>(iota),
  1161. Y = dy;
  1162. // We're doing 8x8 ordered dithering, see https://en.wikipedia.org/wiki/Ordered_dithering.
  1163. // In this case n=8 and we're using the matrix that looks like 1/64 x [ 0 48 12 60 ... ].
  1164. // We only need X and X^Y from here on, so it's easier to just think of that as "Y".
  1165. Y ^= X;
  1166. // We'll mix the bottom 3 bits of each of X and Y to make 6 bits,
  1167. // for 2^6 == 64 == 8x8 matrix values. If X=abc and Y=def, we make fcebda.
  1168. U32 M = (Y & 1) << 5 | (X & 1) << 4
  1169. | (Y & 2) << 2 | (X & 2) << 1
  1170. | (Y & 4) >> 1 | (X & 4) >> 2;
  1171. // Scale that dither to [0,1), then (-0.5,+0.5), here using 63/128 = 0.4921875 as 0.5-epsilon.
  1172. // We want to make sure our dither is less than 0.5 in either direction to keep exact values
  1173. // like 0 and 1 unchanged after rounding.
  1174. F dither = cast(M) * (2/128.0f) - (63/128.0f);
  1175. r += *rate*dither;
  1176. g += *rate*dither;
  1177. b += *rate*dither;
  1178. r = max(0, min(r, a));
  1179. g = max(0, min(g, a));
  1180. b = max(0, min(b, a));
  1181. }
  1182. // load 4 floats from memory, and splat them into r,g,b,a
  1183. STAGE(uniform_color, const SkRasterPipeline_UniformColorCtx* c) {
  1184. r = c->r;
  1185. g = c->g;
  1186. b = c->b;
  1187. a = c->a;
  1188. }
  1189. STAGE(unbounded_uniform_color, const SkRasterPipeline_UniformColorCtx* c) {
  1190. r = c->r;
  1191. g = c->g;
  1192. b = c->b;
  1193. a = c->a;
  1194. }
  1195. // splats opaque-black into r,g,b,a
  1196. STAGE(black_color, Ctx::None) {
  1197. r = g = b = 0.0f;
  1198. a = 1.0f;
  1199. }
  1200. STAGE(white_color, Ctx::None) {
  1201. r = g = b = a = 1.0f;
  1202. }
  1203. // load registers r,g,b,a from context (mirrors store_rgba)
  1204. STAGE(load_src, const float* ptr) {
  1205. r = sk_unaligned_load<F>(ptr + 0*N);
  1206. g = sk_unaligned_load<F>(ptr + 1*N);
  1207. b = sk_unaligned_load<F>(ptr + 2*N);
  1208. a = sk_unaligned_load<F>(ptr + 3*N);
  1209. }
  1210. // store registers r,g,b,a into context (mirrors load_rgba)
  1211. STAGE(store_src, float* ptr) {
  1212. sk_unaligned_store(ptr + 0*N, r);
  1213. sk_unaligned_store(ptr + 1*N, g);
  1214. sk_unaligned_store(ptr + 2*N, b);
  1215. sk_unaligned_store(ptr + 3*N, a);
  1216. }
  1217. // load registers dr,dg,db,da from context (mirrors store_dst)
  1218. STAGE(load_dst, const float* ptr) {
  1219. dr = sk_unaligned_load<F>(ptr + 0*N);
  1220. dg = sk_unaligned_load<F>(ptr + 1*N);
  1221. db = sk_unaligned_load<F>(ptr + 2*N);
  1222. da = sk_unaligned_load<F>(ptr + 3*N);
  1223. }
  1224. // store registers dr,dg,db,da into context (mirrors load_dst)
  1225. STAGE(store_dst, float* ptr) {
  1226. sk_unaligned_store(ptr + 0*N, dr);
  1227. sk_unaligned_store(ptr + 1*N, dg);
  1228. sk_unaligned_store(ptr + 2*N, db);
  1229. sk_unaligned_store(ptr + 3*N, da);
  1230. }
  1231. // Most blend modes apply the same logic to each channel.
  1232. #define BLEND_MODE(name) \
  1233. SI F name##_channel(F s, F d, F sa, F da); \
  1234. STAGE(name, Ctx::None) { \
  1235. r = name##_channel(r,dr,a,da); \
  1236. g = name##_channel(g,dg,a,da); \
  1237. b = name##_channel(b,db,a,da); \
  1238. a = name##_channel(a,da,a,da); \
  1239. } \
  1240. SI F name##_channel(F s, F d, F sa, F da)
  1241. SI F inv(F x) { return 1.0f - x; }
  1242. SI F two(F x) { return x + x; }
  1243. BLEND_MODE(clear) { return 0; }
  1244. BLEND_MODE(srcatop) { return s*da + d*inv(sa); }
  1245. BLEND_MODE(dstatop) { return d*sa + s*inv(da); }
  1246. BLEND_MODE(srcin) { return s * da; }
  1247. BLEND_MODE(dstin) { return d * sa; }
  1248. BLEND_MODE(srcout) { return s * inv(da); }
  1249. BLEND_MODE(dstout) { return d * inv(sa); }
  1250. BLEND_MODE(srcover) { return mad(d, inv(sa), s); }
  1251. BLEND_MODE(dstover) { return mad(s, inv(da), d); }
  1252. BLEND_MODE(modulate) { return s*d; }
  1253. BLEND_MODE(multiply) { return s*inv(da) + d*inv(sa) + s*d; }
  1254. BLEND_MODE(plus_) { return min(s + d, 1.0f); } // We can clamp to either 1 or sa.
  1255. BLEND_MODE(screen) { return s + d - s*d; }
  1256. BLEND_MODE(xor_) { return s*inv(da) + d*inv(sa); }
  1257. #undef BLEND_MODE
  1258. // Most other blend modes apply the same logic to colors, and srcover to alpha.
  1259. #define BLEND_MODE(name) \
  1260. SI F name##_channel(F s, F d, F sa, F da); \
  1261. STAGE(name, Ctx::None) { \
  1262. r = name##_channel(r,dr,a,da); \
  1263. g = name##_channel(g,dg,a,da); \
  1264. b = name##_channel(b,db,a,da); \
  1265. a = mad(da, inv(a), a); \
  1266. } \
  1267. SI F name##_channel(F s, F d, F sa, F da)
  1268. BLEND_MODE(darken) { return s + d - max(s*da, d*sa) ; }
  1269. BLEND_MODE(lighten) { return s + d - min(s*da, d*sa) ; }
  1270. BLEND_MODE(difference) { return s + d - two(min(s*da, d*sa)); }
  1271. BLEND_MODE(exclusion) { return s + d - two(s*d); }
  1272. BLEND_MODE(colorburn) {
  1273. return if_then_else(d == da, d + s*inv(da),
  1274. if_then_else(s == 0, /* s + */ d*inv(sa),
  1275. sa*(da - min(da, (da-d)*sa*rcp(s))) + s*inv(da) + d*inv(sa)));
  1276. }
  1277. BLEND_MODE(colordodge) {
  1278. return if_then_else(d == 0, /* d + */ s*inv(da),
  1279. if_then_else(s == sa, s + d*inv(sa),
  1280. sa*min(da, (d*sa)*rcp(sa - s)) + s*inv(da) + d*inv(sa)));
  1281. }
  1282. BLEND_MODE(hardlight) {
  1283. return s*inv(da) + d*inv(sa)
  1284. + if_then_else(two(s) <= sa, two(s*d), sa*da - two((da-d)*(sa-s)));
  1285. }
  1286. BLEND_MODE(overlay) {
  1287. return s*inv(da) + d*inv(sa)
  1288. + if_then_else(two(d) <= da, two(s*d), sa*da - two((da-d)*(sa-s)));
  1289. }
  1290. BLEND_MODE(softlight) {
  1291. F m = if_then_else(da > 0, d / da, 0),
  1292. s2 = two(s),
  1293. m4 = two(two(m));
  1294. // The logic forks three ways:
  1295. // 1. dark src?
  1296. // 2. light src, dark dst?
  1297. // 3. light src, light dst?
  1298. F darkSrc = d*(sa + (s2 - sa)*(1.0f - m)), // Used in case 1.
  1299. darkDst = (m4*m4 + m4)*(m - 1.0f) + 7.0f*m, // Used in case 2.
  1300. liteDst = rcp(rsqrt(m)) - m, // Used in case 3.
  1301. liteSrc = d*sa + da*(s2 - sa) * if_then_else(two(two(d)) <= da, darkDst, liteDst); // 2 or 3?
  1302. return s*inv(da) + d*inv(sa) + if_then_else(s2 <= sa, darkSrc, liteSrc); // 1 or (2 or 3)?
  1303. }
  1304. #undef BLEND_MODE
  1305. // We're basing our implemenation of non-separable blend modes on
  1306. // https://www.w3.org/TR/compositing-1/#blendingnonseparable.
  1307. // and
  1308. // https://www.khronos.org/registry/OpenGL/specs/es/3.2/es_spec_3.2.pdf
  1309. // They're equivalent, but ES' math has been better simplified.
  1310. //
  1311. // Anything extra we add beyond that is to make the math work with premul inputs.
  1312. SI F max(F r, F g, F b) { return max(r, max(g, b)); }
  1313. SI F min(F r, F g, F b) { return min(r, min(g, b)); }
  1314. SI F sat(F r, F g, F b) { return max(r,g,b) - min(r,g,b); }
  1315. SI F lum(F r, F g, F b) { return r*0.30f + g*0.59f + b*0.11f; }
  1316. SI void set_sat(F* r, F* g, F* b, F s) {
  1317. F mn = min(*r,*g,*b),
  1318. mx = max(*r,*g,*b),
  1319. sat = mx - mn;
  1320. // Map min channel to 0, max channel to s, and scale the middle proportionally.
  1321. auto scale = [=](F c) {
  1322. return if_then_else(sat == 0, 0, (c - mn) * s / sat);
  1323. };
  1324. *r = scale(*r);
  1325. *g = scale(*g);
  1326. *b = scale(*b);
  1327. }
  1328. SI void set_lum(F* r, F* g, F* b, F l) {
  1329. F diff = l - lum(*r, *g, *b);
  1330. *r += diff;
  1331. *g += diff;
  1332. *b += diff;
  1333. }
  1334. SI void clip_color(F* r, F* g, F* b, F a) {
  1335. F mn = min(*r, *g, *b),
  1336. mx = max(*r, *g, *b),
  1337. l = lum(*r, *g, *b);
  1338. auto clip = [=](F c) {
  1339. c = if_then_else(mn >= 0, c, l + (c - l) * ( l) / (l - mn) );
  1340. c = if_then_else(mx > a, l + (c - l) * (a - l) / (mx - l), c);
  1341. c = max(c, 0); // Sometimes without this we may dip just a little negative.
  1342. return c;
  1343. };
  1344. *r = clip(*r);
  1345. *g = clip(*g);
  1346. *b = clip(*b);
  1347. }
  1348. STAGE(hue, Ctx::None) {
  1349. F R = r*a,
  1350. G = g*a,
  1351. B = b*a;
  1352. set_sat(&R, &G, &B, sat(dr,dg,db)*a);
  1353. set_lum(&R, &G, &B, lum(dr,dg,db)*a);
  1354. clip_color(&R,&G,&B, a*da);
  1355. r = r*inv(da) + dr*inv(a) + R;
  1356. g = g*inv(da) + dg*inv(a) + G;
  1357. b = b*inv(da) + db*inv(a) + B;
  1358. a = a + da - a*da;
  1359. }
  1360. STAGE(saturation, Ctx::None) {
  1361. F R = dr*a,
  1362. G = dg*a,
  1363. B = db*a;
  1364. set_sat(&R, &G, &B, sat( r, g, b)*da);
  1365. set_lum(&R, &G, &B, lum(dr,dg,db)* a); // (This is not redundant.)
  1366. clip_color(&R,&G,&B, a*da);
  1367. r = r*inv(da) + dr*inv(a) + R;
  1368. g = g*inv(da) + dg*inv(a) + G;
  1369. b = b*inv(da) + db*inv(a) + B;
  1370. a = a + da - a*da;
  1371. }
  1372. STAGE(color, Ctx::None) {
  1373. F R = r*da,
  1374. G = g*da,
  1375. B = b*da;
  1376. set_lum(&R, &G, &B, lum(dr,dg,db)*a);
  1377. clip_color(&R,&G,&B, a*da);
  1378. r = r*inv(da) + dr*inv(a) + R;
  1379. g = g*inv(da) + dg*inv(a) + G;
  1380. b = b*inv(da) + db*inv(a) + B;
  1381. a = a + da - a*da;
  1382. }
  1383. STAGE(luminosity, Ctx::None) {
  1384. F R = dr*a,
  1385. G = dg*a,
  1386. B = db*a;
  1387. set_lum(&R, &G, &B, lum(r,g,b)*da);
  1388. clip_color(&R,&G,&B, a*da);
  1389. r = r*inv(da) + dr*inv(a) + R;
  1390. g = g*inv(da) + dg*inv(a) + G;
  1391. b = b*inv(da) + db*inv(a) + B;
  1392. a = a + da - a*da;
  1393. }
  1394. STAGE(srcover_rgba_8888, const SkRasterPipeline_MemoryCtx* ctx) {
  1395. auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
  1396. U32 dst = load<U32>(ptr, tail);
  1397. dr = cast((dst ) & 0xff);
  1398. dg = cast((dst >> 8) & 0xff);
  1399. db = cast((dst >> 16) & 0xff);
  1400. da = cast((dst >> 24) );
  1401. // {dr,dg,db,da} are in [0,255]
  1402. // { r, g, b, a} are in [0, 1] (but may be out of gamut)
  1403. r = mad(dr, inv(a), r*255.0f);
  1404. g = mad(dg, inv(a), g*255.0f);
  1405. b = mad(db, inv(a), b*255.0f);
  1406. a = mad(da, inv(a), a*255.0f);
  1407. // { r, g, b, a} are now in [0,255] (but may be out of gamut)
  1408. // to_unorm() clamps back to gamut. Scaling by 1 since we're already 255-biased.
  1409. dst = to_unorm(r, 1, 255)
  1410. | to_unorm(g, 1, 255) << 8
  1411. | to_unorm(b, 1, 255) << 16
  1412. | to_unorm(a, 1, 255) << 24;
  1413. store(ptr, dst, tail);
  1414. }
  1415. STAGE(clamp_0, Ctx::None) {
  1416. r = max(r, 0);
  1417. g = max(g, 0);
  1418. b = max(b, 0);
  1419. a = max(a, 0);
  1420. }
  1421. STAGE(clamp_1, Ctx::None) {
  1422. r = min(r, 1.0f);
  1423. g = min(g, 1.0f);
  1424. b = min(b, 1.0f);
  1425. a = min(a, 1.0f);
  1426. }
  1427. STAGE(clamp_a, Ctx::None) {
  1428. a = min(a, 1.0f);
  1429. r = min(r, a);
  1430. g = min(g, a);
  1431. b = min(b, a);
  1432. }
  1433. STAGE(clamp_gamut, Ctx::None) {
  1434. // If you're using this stage, a should already be in [0,1].
  1435. r = min(max(r, 0), a);
  1436. g = min(max(g, 0), a);
  1437. b = min(max(b, 0), a);
  1438. }
  1439. STAGE(set_rgb, const float* rgb) {
  1440. r = rgb[0];
  1441. g = rgb[1];
  1442. b = rgb[2];
  1443. }
  1444. STAGE(unbounded_set_rgb, const float* rgb) {
  1445. r = rgb[0];
  1446. g = rgb[1];
  1447. b = rgb[2];
  1448. }
  1449. STAGE(swap_rb, Ctx::None) {
  1450. auto tmp = r;
  1451. r = b;
  1452. b = tmp;
  1453. }
  1454. STAGE(swap_rb_dst, Ctx::None) {
  1455. auto tmp = dr;
  1456. dr = db;
  1457. db = tmp;
  1458. }
  1459. STAGE(move_src_dst, Ctx::None) {
  1460. dr = r;
  1461. dg = g;
  1462. db = b;
  1463. da = a;
  1464. }
  1465. STAGE(move_dst_src, Ctx::None) {
  1466. r = dr;
  1467. g = dg;
  1468. b = db;
  1469. a = da;
  1470. }
  1471. STAGE(premul, Ctx::None) {
  1472. r = r * a;
  1473. g = g * a;
  1474. b = b * a;
  1475. }
  1476. STAGE(premul_dst, Ctx::None) {
  1477. dr = dr * da;
  1478. dg = dg * da;
  1479. db = db * da;
  1480. }
  1481. STAGE(unpremul, Ctx::None) {
  1482. float inf = bit_cast<float>(0x7f800000);
  1483. auto scale = if_then_else(1.0f/a < inf, 1.0f/a, 0);
  1484. r *= scale;
  1485. g *= scale;
  1486. b *= scale;
  1487. }
  1488. STAGE(force_opaque , Ctx::None) { a = 1; }
  1489. STAGE(force_opaque_dst, Ctx::None) { da = 1; }
  1490. STAGE(rgb_to_hsl, Ctx::None) {
  1491. F mx = max(r,g,b),
  1492. mn = min(r,g,b),
  1493. d = mx - mn,
  1494. d_rcp = 1.0f / d;
  1495. F h = (1/6.0f) *
  1496. if_then_else(mx == mn, 0,
  1497. if_then_else(mx == r, (g-b)*d_rcp + if_then_else(g < b, 6.0f, 0),
  1498. if_then_else(mx == g, (b-r)*d_rcp + 2.0f,
  1499. (r-g)*d_rcp + 4.0f)));
  1500. F l = (mx + mn) * 0.5f;
  1501. F s = if_then_else(mx == mn, 0,
  1502. d / if_then_else(l > 0.5f, 2.0f-mx-mn, mx+mn));
  1503. r = h;
  1504. g = s;
  1505. b = l;
  1506. }
  1507. STAGE(hsl_to_rgb, Ctx::None) {
  1508. F h = r,
  1509. s = g,
  1510. l = b;
  1511. F q = l + if_then_else(l >= 0.5f, s - l*s, l*s),
  1512. p = 2.0f*l - q;
  1513. auto hue_to_rgb = [&](F t) {
  1514. t = fract(t);
  1515. F r = p;
  1516. r = if_then_else(t >= 4/6.0f, r, p + (q-p)*(4.0f - 6.0f*t));
  1517. r = if_then_else(t >= 3/6.0f, r, q);
  1518. r = if_then_else(t >= 1/6.0f, r, p + (q-p)*( 6.0f*t));
  1519. return r;
  1520. };
  1521. r = if_then_else(s == 0, l, hue_to_rgb(h + (1/3.0f)));
  1522. g = if_then_else(s == 0, l, hue_to_rgb(h ));
  1523. b = if_then_else(s == 0, l, hue_to_rgb(h - (1/3.0f)));
  1524. }
  1525. // Derive alpha's coverage from rgb coverage and the values of src and dst alpha.
  1526. SI F alpha_coverage_from_rgb_coverage(F a, F da, F cr, F cg, F cb) {
  1527. return if_then_else(a < da, min(cr,cg,cb)
  1528. , max(cr,cg,cb));
  1529. }
  1530. STAGE(scale_1_float, const float* c) {
  1531. r = r * *c;
  1532. g = g * *c;
  1533. b = b * *c;
  1534. a = a * *c;
  1535. }
  1536. STAGE(scale_u8, const SkRasterPipeline_MemoryCtx* ctx) {
  1537. auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
  1538. auto scales = load<U8>(ptr, tail);
  1539. auto c = from_byte(scales);
  1540. r = r * c;
  1541. g = g * c;
  1542. b = b * c;
  1543. a = a * c;
  1544. }
  1545. STAGE(scale_565, const SkRasterPipeline_MemoryCtx* ctx) {
  1546. auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
  1547. F cr,cg,cb;
  1548. from_565(load<U16>(ptr, tail), &cr, &cg, &cb);
  1549. F ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
  1550. r = r * cr;
  1551. g = g * cg;
  1552. b = b * cb;
  1553. a = a * ca;
  1554. }
  1555. SI F lerp(F from, F to, F t) {
  1556. return mad(to-from, t, from);
  1557. }
  1558. STAGE(lerp_1_float, const float* c) {
  1559. r = lerp(dr, r, *c);
  1560. g = lerp(dg, g, *c);
  1561. b = lerp(db, b, *c);
  1562. a = lerp(da, a, *c);
  1563. }
  1564. STAGE(lerp_native, const float scales[]) {
  1565. auto c = sk_unaligned_load<F>(scales);
  1566. r = lerp(dr, r, c);
  1567. g = lerp(dg, g, c);
  1568. b = lerp(db, b, c);
  1569. a = lerp(da, a, c);
  1570. }
  1571. STAGE(lerp_u8, const SkRasterPipeline_MemoryCtx* ctx) {
  1572. auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
  1573. auto scales = load<U8>(ptr, tail);
  1574. auto c = from_byte(scales);
  1575. r = lerp(dr, r, c);
  1576. g = lerp(dg, g, c);
  1577. b = lerp(db, b, c);
  1578. a = lerp(da, a, c);
  1579. }
  1580. STAGE(lerp_565, const SkRasterPipeline_MemoryCtx* ctx) {
  1581. auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
  1582. F cr,cg,cb;
  1583. from_565(load<U16>(ptr, tail), &cr, &cg, &cb);
  1584. F ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
  1585. r = lerp(dr, r, cr);
  1586. g = lerp(dg, g, cg);
  1587. b = lerp(db, b, cb);
  1588. a = lerp(da, a, ca);
  1589. }
  1590. STAGE(emboss, const SkRasterPipeline_EmbossCtx* ctx) {
  1591. auto mptr = ptr_at_xy<const uint8_t>(&ctx->mul, dx,dy),
  1592. aptr = ptr_at_xy<const uint8_t>(&ctx->add, dx,dy);
  1593. F mul = from_byte(load<U8>(mptr, tail)),
  1594. add = from_byte(load<U8>(aptr, tail));
  1595. r = mad(r, mul, add);
  1596. g = mad(g, mul, add);
  1597. b = mad(b, mul, add);
  1598. }
  1599. STAGE(byte_tables, const void* ctx) { // TODO: rename Tables SkRasterPipeline_ByteTablesCtx
  1600. struct Tables { const uint8_t *r, *g, *b, *a; };
  1601. auto tables = (const Tables*)ctx;
  1602. r = from_byte(gather(tables->r, to_unorm(r, 255)));
  1603. g = from_byte(gather(tables->g, to_unorm(g, 255)));
  1604. b = from_byte(gather(tables->b, to_unorm(b, 255)));
  1605. a = from_byte(gather(tables->a, to_unorm(a, 255)));
  1606. }
  1607. SI F strip_sign(F x, U32* sign) {
  1608. U32 bits = bit_cast<U32>(x);
  1609. *sign = bits & 0x80000000;
  1610. return bit_cast<F>(bits ^ *sign);
  1611. }
  1612. SI F apply_sign(F x, U32 sign) {
  1613. return bit_cast<F>(sign | bit_cast<U32>(x));
  1614. }
  1615. STAGE(parametric, const skcms_TransferFunction* ctx) {
  1616. auto fn = [&](F v) {
  1617. U32 sign;
  1618. v = strip_sign(v, &sign);
  1619. F r = if_then_else(v <= ctx->d, mad(ctx->c, v, ctx->f)
  1620. , approx_powf(mad(ctx->a, v, ctx->b), ctx->g) + ctx->e);
  1621. return apply_sign(r, sign);
  1622. };
  1623. r = fn(r);
  1624. g = fn(g);
  1625. b = fn(b);
  1626. }
  1627. STAGE(gamma_, const float* G) {
  1628. auto fn = [&](F v) {
  1629. U32 sign;
  1630. v = strip_sign(v, &sign);
  1631. return apply_sign(approx_powf(v, *G), sign);
  1632. };
  1633. r = fn(r);
  1634. g = fn(g);
  1635. b = fn(b);
  1636. }
  1637. STAGE(from_srgb, Ctx::None) {
  1638. auto fn = [](F s) {
  1639. U32 sign;
  1640. s = strip_sign(s, &sign);
  1641. auto lo = s * (1/12.92f);
  1642. auto hi = mad(s*s, mad(s, 0.3000f, 0.6975f), 0.0025f);
  1643. return apply_sign(if_then_else(s < 0.055f, lo, hi), sign);
  1644. };
  1645. r = fn(r);
  1646. g = fn(g);
  1647. b = fn(b);
  1648. }
  1649. STAGE(to_srgb, Ctx::None) {
  1650. auto fn = [](F l) {
  1651. U32 sign;
  1652. l = strip_sign(l, &sign);
  1653. // We tweak c and d for each instruction set to make sure fn(1) is exactly 1.
  1654. #if defined(JUMPER_IS_AVX512)
  1655. const float c = 1.130026340485f,
  1656. d = 0.141387879848f;
  1657. #elif defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41) || \
  1658. defined(JUMPER_IS_AVX ) || defined(JUMPER_IS_HSW )
  1659. const float c = 1.130048394203f,
  1660. d = 0.141357362270f;
  1661. #elif defined(JUMPER_IS_NEON)
  1662. const float c = 1.129999995232f,
  1663. d = 0.141381442547f;
  1664. #else
  1665. const float c = 1.129999995232f,
  1666. d = 0.141377761960f;
  1667. #endif
  1668. F t = rsqrt(l);
  1669. auto lo = l * 12.92f;
  1670. auto hi = mad(t, mad(t, -0.0024542345f, 0.013832027f), c)
  1671. * rcp(d + t);
  1672. return apply_sign(if_then_else(l < 0.00465985f, lo, hi), sign);
  1673. };
  1674. r = fn(r);
  1675. g = fn(g);
  1676. b = fn(b);
  1677. }
  1678. STAGE(load_a8, const SkRasterPipeline_MemoryCtx* ctx) {
  1679. auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
  1680. r = g = b = 0.0f;
  1681. a = from_byte(load<U8>(ptr, tail));
  1682. }
  1683. STAGE(load_a8_dst, const SkRasterPipeline_MemoryCtx* ctx) {
  1684. auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
  1685. dr = dg = db = 0.0f;
  1686. da = from_byte(load<U8>(ptr, tail));
  1687. }
  1688. STAGE(gather_a8, const SkRasterPipeline_GatherCtx* ctx) {
  1689. const uint8_t* ptr;
  1690. U32 ix = ix_and_ptr(&ptr, ctx, r,g);
  1691. r = g = b = 0.0f;
  1692. a = from_byte(gather(ptr, ix));
  1693. }
  1694. STAGE(store_a8, const SkRasterPipeline_MemoryCtx* ctx) {
  1695. auto ptr = ptr_at_xy<uint8_t>(ctx, dx,dy);
  1696. U8 packed = pack(pack(to_unorm(a, 255)));
  1697. store(ptr, packed, tail);
  1698. }
  1699. STAGE(load_565, const SkRasterPipeline_MemoryCtx* ctx) {
  1700. auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
  1701. from_565(load<U16>(ptr, tail), &r,&g,&b);
  1702. a = 1.0f;
  1703. }
  1704. STAGE(load_565_dst, const SkRasterPipeline_MemoryCtx* ctx) {
  1705. auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
  1706. from_565(load<U16>(ptr, tail), &dr,&dg,&db);
  1707. da = 1.0f;
  1708. }
  1709. STAGE(gather_565, const SkRasterPipeline_GatherCtx* ctx) {
  1710. const uint16_t* ptr;
  1711. U32 ix = ix_and_ptr(&ptr, ctx, r,g);
  1712. from_565(gather(ptr, ix), &r,&g,&b);
  1713. a = 1.0f;
  1714. }
  1715. STAGE(store_565, const SkRasterPipeline_MemoryCtx* ctx) {
  1716. auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
  1717. U16 px = pack( to_unorm(r, 31) << 11
  1718. | to_unorm(g, 63) << 5
  1719. | to_unorm(b, 31) );
  1720. store(ptr, px, tail);
  1721. }
  1722. STAGE(load_4444, const SkRasterPipeline_MemoryCtx* ctx) {
  1723. auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
  1724. from_4444(load<U16>(ptr, tail), &r,&g,&b,&a);
  1725. }
  1726. STAGE(load_4444_dst, const SkRasterPipeline_MemoryCtx* ctx) {
  1727. auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
  1728. from_4444(load<U16>(ptr, tail), &dr,&dg,&db,&da);
  1729. }
  1730. STAGE(gather_4444, const SkRasterPipeline_GatherCtx* ctx) {
  1731. const uint16_t* ptr;
  1732. U32 ix = ix_and_ptr(&ptr, ctx, r,g);
  1733. from_4444(gather(ptr, ix), &r,&g,&b,&a);
  1734. }
  1735. STAGE(store_4444, const SkRasterPipeline_MemoryCtx* ctx) {
  1736. auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
  1737. U16 px = pack( to_unorm(r, 15) << 12
  1738. | to_unorm(g, 15) << 8
  1739. | to_unorm(b, 15) << 4
  1740. | to_unorm(a, 15) );
  1741. store(ptr, px, tail);
  1742. }
  1743. STAGE(load_8888, const SkRasterPipeline_MemoryCtx* ctx) {
  1744. auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
  1745. from_8888(load<U32>(ptr, tail), &r,&g,&b,&a);
  1746. }
  1747. STAGE(load_8888_dst, const SkRasterPipeline_MemoryCtx* ctx) {
  1748. auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
  1749. from_8888(load<U32>(ptr, tail), &dr,&dg,&db,&da);
  1750. }
  1751. STAGE(gather_8888, const SkRasterPipeline_GatherCtx* ctx) {
  1752. const uint32_t* ptr;
  1753. U32 ix = ix_and_ptr(&ptr, ctx, r,g);
  1754. from_8888(gather(ptr, ix), &r,&g,&b,&a);
  1755. }
  1756. STAGE(store_8888, const SkRasterPipeline_MemoryCtx* ctx) {
  1757. auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
  1758. U32 px = to_unorm(r, 255)
  1759. | to_unorm(g, 255) << 8
  1760. | to_unorm(b, 255) << 16
  1761. | to_unorm(a, 255) << 24;
  1762. store(ptr, px, tail);
  1763. }
  1764. STAGE(load_rg88, const SkRasterPipeline_MemoryCtx* ctx) {
  1765. auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
  1766. b = 0;
  1767. a = 1;
  1768. from_88(load<U16>(ptr, tail), &r,&g);
  1769. }
  1770. STAGE(store_rg88, const SkRasterPipeline_MemoryCtx* ctx) {
  1771. auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
  1772. U16 px = pack( to_unorm(r, 255)
  1773. | to_unorm(g, 255) << 8);
  1774. store(ptr, px, tail);
  1775. }
  1776. STAGE(load_a16, const SkRasterPipeline_MemoryCtx* ctx) {
  1777. auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
  1778. r = g = b = 0;
  1779. a = from_short(load<U16>(ptr, tail));
  1780. }
  1781. STAGE(store_a16, const SkRasterPipeline_MemoryCtx* ctx) {
  1782. auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
  1783. U16 px = pack(to_unorm(a, 65535));
  1784. store(ptr, px, tail);
  1785. }
  1786. STAGE(load_rg1616, const SkRasterPipeline_MemoryCtx* ctx) {
  1787. auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
  1788. b = 0; a = 1;
  1789. from_1616(load<U32>(ptr, tail), &r,&g);
  1790. }
  1791. STAGE(store_rg1616, const SkRasterPipeline_MemoryCtx* ctx) {
  1792. auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
  1793. U32 px = to_unorm(r, 65535)
  1794. | to_unorm(g, 65535) << 16;
  1795. store(ptr, px, tail);
  1796. }
  1797. STAGE(load_16161616, const SkRasterPipeline_MemoryCtx* ctx) {
  1798. auto ptr = ptr_at_xy<const uint64_t>(ctx, dx,dy);
  1799. from_16161616(load<U64>(ptr, tail), &r,&g, &b, &a);
  1800. }
  1801. STAGE(store_16161616, const SkRasterPipeline_MemoryCtx* ctx) {
  1802. auto ptr = ptr_at_xy<uint16_t>(ctx, 4*dx,4*dy);
  1803. U16 R = pack(to_unorm(r, 65535)),
  1804. G = pack(to_unorm(g, 65535)),
  1805. B = pack(to_unorm(b, 65535)),
  1806. A = pack(to_unorm(a, 65535));
  1807. store4(ptr,tail, R,G,B,A);
  1808. }
  1809. STAGE(load_1010102, const SkRasterPipeline_MemoryCtx* ctx) {
  1810. auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
  1811. from_1010102(load<U32>(ptr, tail), &r,&g,&b,&a);
  1812. }
  1813. STAGE(load_1010102_dst, const SkRasterPipeline_MemoryCtx* ctx) {
  1814. auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
  1815. from_1010102(load<U32>(ptr, tail), &dr,&dg,&db,&da);
  1816. }
  1817. STAGE(gather_1010102, const SkRasterPipeline_GatherCtx* ctx) {
  1818. const uint32_t* ptr;
  1819. U32 ix = ix_and_ptr(&ptr, ctx, r,g);
  1820. from_1010102(gather(ptr, ix), &r,&g,&b,&a);
  1821. }
  1822. STAGE(store_1010102, const SkRasterPipeline_MemoryCtx* ctx) {
  1823. auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
  1824. U32 px = to_unorm(r, 1023)
  1825. | to_unorm(g, 1023) << 10
  1826. | to_unorm(b, 1023) << 20
  1827. | to_unorm(a, 3) << 30;
  1828. store(ptr, px, tail);
  1829. }
  1830. STAGE(load_f16, const SkRasterPipeline_MemoryCtx* ctx) {
  1831. auto ptr = ptr_at_xy<const uint64_t>(ctx, dx,dy);
  1832. U16 R,G,B,A;
  1833. load4((const uint16_t*)ptr,tail, &R,&G,&B,&A);
  1834. r = from_half(R);
  1835. g = from_half(G);
  1836. b = from_half(B);
  1837. a = from_half(A);
  1838. }
  1839. STAGE(load_f16_dst, const SkRasterPipeline_MemoryCtx* ctx) {
  1840. auto ptr = ptr_at_xy<const uint64_t>(ctx, dx,dy);
  1841. U16 R,G,B,A;
  1842. load4((const uint16_t*)ptr,tail, &R,&G,&B,&A);
  1843. dr = from_half(R);
  1844. dg = from_half(G);
  1845. db = from_half(B);
  1846. da = from_half(A);
  1847. }
  1848. STAGE(gather_f16, const SkRasterPipeline_GatherCtx* ctx) {
  1849. const uint64_t* ptr;
  1850. U32 ix = ix_and_ptr(&ptr, ctx, r,g);
  1851. auto px = gather(ptr, ix);
  1852. U16 R,G,B,A;
  1853. load4((const uint16_t*)&px,0, &R,&G,&B,&A);
  1854. r = from_half(R);
  1855. g = from_half(G);
  1856. b = from_half(B);
  1857. a = from_half(A);
  1858. }
  1859. STAGE(store_f16, const SkRasterPipeline_MemoryCtx* ctx) {
  1860. auto ptr = ptr_at_xy<uint64_t>(ctx, dx,dy);
  1861. store4((uint16_t*)ptr,tail, to_half(r)
  1862. , to_half(g)
  1863. , to_half(b)
  1864. , to_half(a));
  1865. }
  1866. STAGE(store_u16_be, const SkRasterPipeline_MemoryCtx* ctx) {
  1867. auto ptr = ptr_at_xy<uint16_t>(ctx, 4*dx,dy);
  1868. U16 R = bswap(pack(to_unorm(r, 65535))),
  1869. G = bswap(pack(to_unorm(g, 65535))),
  1870. B = bswap(pack(to_unorm(b, 65535))),
  1871. A = bswap(pack(to_unorm(a, 65535)));
  1872. store4(ptr,tail, R,G,B,A);
  1873. }
  1874. STAGE(load_af16, const SkRasterPipeline_MemoryCtx* ctx) {
  1875. auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
  1876. U16 A = load<U16>((const uint16_t*)ptr, tail);
  1877. r = 0;
  1878. g = 0;
  1879. b = 0;
  1880. a = from_half(A);
  1881. }
  1882. STAGE(store_af16, const SkRasterPipeline_MemoryCtx* ctx) {
  1883. auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
  1884. store(ptr, to_half(a), tail);
  1885. }
  1886. STAGE(load_rgf16, const SkRasterPipeline_MemoryCtx* ctx) {
  1887. auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
  1888. U16 R,G;
  1889. load2((const uint16_t*)ptr,tail, &R,&G);
  1890. r = from_half(R);
  1891. g = from_half(G);
  1892. b = 0;
  1893. a = from_half(0x3C00); // one
  1894. }
  1895. STAGE(store_rgf16, const SkRasterPipeline_MemoryCtx* ctx) {
  1896. auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
  1897. store2((uint16_t*)ptr, tail, to_half(r)
  1898. , to_half(g));
  1899. }
  1900. STAGE(load_f32, const SkRasterPipeline_MemoryCtx* ctx) {
  1901. auto ptr = ptr_at_xy<const float>(ctx, 4*dx,4*dy);
  1902. load4(ptr,tail, &r,&g,&b,&a);
  1903. }
  1904. STAGE(load_f32_dst, const SkRasterPipeline_MemoryCtx* ctx) {
  1905. auto ptr = ptr_at_xy<const float>(ctx, 4*dx,4*dy);
  1906. load4(ptr,tail, &dr,&dg,&db,&da);
  1907. }
  1908. STAGE(gather_f32, const SkRasterPipeline_GatherCtx* ctx) {
  1909. const float* ptr;
  1910. U32 ix = ix_and_ptr(&ptr, ctx, r,g);
  1911. r = gather(ptr, 4*ix + 0);
  1912. g = gather(ptr, 4*ix + 1);
  1913. b = gather(ptr, 4*ix + 2);
  1914. a = gather(ptr, 4*ix + 3);
  1915. }
  1916. STAGE(store_f32, const SkRasterPipeline_MemoryCtx* ctx) {
  1917. auto ptr = ptr_at_xy<float>(ctx, 4*dx,4*dy);
  1918. store4(ptr,tail, r,g,b,a);
  1919. }
  1920. STAGE(load_rgf32, const SkRasterPipeline_MemoryCtx* ctx) {
  1921. auto ptr = ptr_at_xy<const float>(ctx, 2*dx,2*dy);
  1922. load2(ptr, tail, &r, &g);
  1923. b = 0;
  1924. a = 1;
  1925. }
  1926. STAGE(store_rgf32, const SkRasterPipeline_MemoryCtx* ctx) {
  1927. auto ptr = ptr_at_xy<float>(ctx, 2*dx,2*dy);
  1928. store2(ptr, tail, r, g);
  1929. }
  1930. SI F exclusive_repeat(F v, const SkRasterPipeline_TileCtx* ctx) {
  1931. return v - floor_(v*ctx->invScale)*ctx->scale;
  1932. }
  1933. SI F exclusive_mirror(F v, const SkRasterPipeline_TileCtx* ctx) {
  1934. auto limit = ctx->scale;
  1935. auto invLimit = ctx->invScale;
  1936. return abs_( (v-limit) - (limit+limit)*floor_((v-limit)*(invLimit*0.5f)) - limit );
  1937. }
  1938. // Tile x or y to [0,limit) == [0,limit - 1 ulp] (think, sampling from images).
  1939. // The gather stages will hard clamp the output of these stages to [0,limit)...
  1940. // we just need to do the basic repeat or mirroring.
  1941. STAGE(repeat_x, const SkRasterPipeline_TileCtx* ctx) { r = exclusive_repeat(r, ctx); }
  1942. STAGE(repeat_y, const SkRasterPipeline_TileCtx* ctx) { g = exclusive_repeat(g, ctx); }
  1943. STAGE(mirror_x, const SkRasterPipeline_TileCtx* ctx) { r = exclusive_mirror(r, ctx); }
  1944. STAGE(mirror_y, const SkRasterPipeline_TileCtx* ctx) { g = exclusive_mirror(g, ctx); }
  1945. // Clamp x to [0,1], both sides inclusive (think, gradients).
  1946. // Even repeat and mirror funnel through a clamp to handle bad inputs like +Inf, NaN.
  1947. SI F clamp_01(F v) { return min(max(0, v), 1); }
  1948. STAGE( clamp_x_1, Ctx::None) { r = clamp_01(r); }
  1949. STAGE(repeat_x_1, Ctx::None) { r = clamp_01(r - floor_(r)); }
  1950. STAGE(mirror_x_1, Ctx::None) { r = clamp_01(abs_( (r-1.0f) - two(floor_((r-1.0f)*0.5f)) - 1.0f )); }
  1951. // Decal stores a 32bit mask after checking the coordinate (x and/or y) against its domain:
  1952. // mask == 0x00000000 if the coordinate(s) are out of bounds
  1953. // mask == 0xFFFFFFFF if the coordinate(s) are in bounds
  1954. // After the gather stage, the r,g,b,a values are AND'd with this mask, setting them to 0
  1955. // if either of the coordinates were out of bounds.
  1956. STAGE(decal_x, SkRasterPipeline_DecalTileCtx* ctx) {
  1957. auto w = ctx->limit_x;
  1958. sk_unaligned_store(ctx->mask, cond_to_mask((0 <= r) & (r < w)));
  1959. }
  1960. STAGE(decal_y, SkRasterPipeline_DecalTileCtx* ctx) {
  1961. auto h = ctx->limit_y;
  1962. sk_unaligned_store(ctx->mask, cond_to_mask((0 <= g) & (g < h)));
  1963. }
  1964. STAGE(decal_x_and_y, SkRasterPipeline_DecalTileCtx* ctx) {
  1965. auto w = ctx->limit_x;
  1966. auto h = ctx->limit_y;
  1967. sk_unaligned_store(ctx->mask,
  1968. cond_to_mask((0 <= r) & (r < w) & (0 <= g) & (g < h)));
  1969. }
  1970. STAGE(check_decal_mask, SkRasterPipeline_DecalTileCtx* ctx) {
  1971. auto mask = sk_unaligned_load<U32>(ctx->mask);
  1972. r = bit_cast<F>( bit_cast<U32>(r) & mask );
  1973. g = bit_cast<F>( bit_cast<U32>(g) & mask );
  1974. b = bit_cast<F>( bit_cast<U32>(b) & mask );
  1975. a = bit_cast<F>( bit_cast<U32>(a) & mask );
  1976. }
  1977. STAGE(alpha_to_gray, Ctx::None) {
  1978. r = g = b = a;
  1979. a = 1;
  1980. }
  1981. STAGE(alpha_to_gray_dst, Ctx::None) {
  1982. dr = dg = db = da;
  1983. da = 1;
  1984. }
  1985. STAGE(bt709_luminance_or_luma_to_alpha, Ctx::None) {
  1986. a = r*0.2126f + g*0.7152f + b*0.0722f;
  1987. r = g = b = 0;
  1988. }
  1989. STAGE(matrix_translate, const float* m) {
  1990. r += m[0];
  1991. g += m[1];
  1992. }
  1993. STAGE(matrix_scale_translate, const float* m) {
  1994. r = mad(r,m[0], m[2]);
  1995. g = mad(g,m[1], m[3]);
  1996. }
  1997. STAGE(matrix_2x3, const float* m) {
  1998. auto R = mad(r,m[0], mad(g,m[2], m[4])),
  1999. G = mad(r,m[1], mad(g,m[3], m[5]));
  2000. r = R;
  2001. g = G;
  2002. }
  2003. STAGE(matrix_3x3, const float* m) {
  2004. auto R = mad(r,m[0], mad(g,m[3], b*m[6])),
  2005. G = mad(r,m[1], mad(g,m[4], b*m[7])),
  2006. B = mad(r,m[2], mad(g,m[5], b*m[8]));
  2007. r = R;
  2008. g = G;
  2009. b = B;
  2010. }
  2011. STAGE(matrix_3x4, const float* m) {
  2012. auto R = mad(r,m[0], mad(g,m[3], mad(b,m[6], m[ 9]))),
  2013. G = mad(r,m[1], mad(g,m[4], mad(b,m[7], m[10]))),
  2014. B = mad(r,m[2], mad(g,m[5], mad(b,m[8], m[11])));
  2015. r = R;
  2016. g = G;
  2017. b = B;
  2018. }
  2019. STAGE(matrix_4x5, const float* m) {
  2020. auto R = mad(r,m[ 0], mad(g,m[ 1], mad(b,m[ 2], mad(a,m[ 3], m[ 4])))),
  2021. G = mad(r,m[ 5], mad(g,m[ 6], mad(b,m[ 7], mad(a,m[ 8], m[ 9])))),
  2022. B = mad(r,m[10], mad(g,m[11], mad(b,m[12], mad(a,m[13], m[14])))),
  2023. A = mad(r,m[15], mad(g,m[16], mad(b,m[17], mad(a,m[18], m[19]))));
  2024. r = R;
  2025. g = G;
  2026. b = B;
  2027. a = A;
  2028. }
  2029. STAGE(matrix_4x3, const float* m) {
  2030. auto X = r,
  2031. Y = g;
  2032. r = mad(X, m[0], mad(Y, m[4], m[ 8]));
  2033. g = mad(X, m[1], mad(Y, m[5], m[ 9]));
  2034. b = mad(X, m[2], mad(Y, m[6], m[10]));
  2035. a = mad(X, m[3], mad(Y, m[7], m[11]));
  2036. }
  2037. STAGE(matrix_perspective, const float* m) {
  2038. // N.B. Unlike the other matrix_ stages, this matrix is row-major.
  2039. auto R = mad(r,m[0], mad(g,m[1], m[2])),
  2040. G = mad(r,m[3], mad(g,m[4], m[5])),
  2041. Z = mad(r,m[6], mad(g,m[7], m[8]));
  2042. r = R * rcp(Z);
  2043. g = G * rcp(Z);
  2044. }
  2045. SI void gradient_lookup(const SkRasterPipeline_GradientCtx* c, U32 idx, F t,
  2046. F* r, F* g, F* b, F* a) {
  2047. F fr, br, fg, bg, fb, bb, fa, ba;
  2048. #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
  2049. if (c->stopCount <=8) {
  2050. fr = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[0]), idx);
  2051. br = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[0]), idx);
  2052. fg = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[1]), idx);
  2053. bg = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[1]), idx);
  2054. fb = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[2]), idx);
  2055. bb = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[2]), idx);
  2056. fa = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[3]), idx);
  2057. ba = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[3]), idx);
  2058. } else
  2059. #endif
  2060. {
  2061. fr = gather(c->fs[0], idx);
  2062. br = gather(c->bs[0], idx);
  2063. fg = gather(c->fs[1], idx);
  2064. bg = gather(c->bs[1], idx);
  2065. fb = gather(c->fs[2], idx);
  2066. bb = gather(c->bs[2], idx);
  2067. fa = gather(c->fs[3], idx);
  2068. ba = gather(c->bs[3], idx);
  2069. }
  2070. *r = mad(t, fr, br);
  2071. *g = mad(t, fg, bg);
  2072. *b = mad(t, fb, bb);
  2073. *a = mad(t, fa, ba);
  2074. }
  2075. STAGE(evenly_spaced_gradient, const SkRasterPipeline_GradientCtx* c) {
  2076. auto t = r;
  2077. auto idx = trunc_(t * (c->stopCount-1));
  2078. gradient_lookup(c, idx, t, &r, &g, &b, &a);
  2079. }
  2080. STAGE(gradient, const SkRasterPipeline_GradientCtx* c) {
  2081. auto t = r;
  2082. U32 idx = 0;
  2083. // N.B. The loop starts at 1 because idx 0 is the color to use before the first stop.
  2084. for (size_t i = 1; i < c->stopCount; i++) {
  2085. idx += if_then_else(t >= c->ts[i], U32(1), U32(0));
  2086. }
  2087. gradient_lookup(c, idx, t, &r, &g, &b, &a);
  2088. }
  2089. STAGE(evenly_spaced_2_stop_gradient, const void* ctx) {
  2090. // TODO: Rename Ctx SkRasterPipeline_EvenlySpaced2StopGradientCtx.
  2091. struct Ctx { float f[4], b[4]; };
  2092. auto c = (const Ctx*)ctx;
  2093. auto t = r;
  2094. r = mad(t, c->f[0], c->b[0]);
  2095. g = mad(t, c->f[1], c->b[1]);
  2096. b = mad(t, c->f[2], c->b[2]);
  2097. a = mad(t, c->f[3], c->b[3]);
  2098. }
  2099. STAGE(xy_to_unit_angle, Ctx::None) {
  2100. F X = r,
  2101. Y = g;
  2102. F xabs = abs_(X),
  2103. yabs = abs_(Y);
  2104. F slope = min(xabs, yabs)/max(xabs, yabs);
  2105. F s = slope * slope;
  2106. // Use a 7th degree polynomial to approximate atan.
  2107. // This was generated using sollya.gforge.inria.fr.
  2108. // A float optimized polynomial was generated using the following command.
  2109. // P1 = fpminimax((1/(2*Pi))*atan(x),[|1,3,5,7|],[|24...|],[2^(-40),1],relative);
  2110. F phi = slope
  2111. * (0.15912117063999176025390625f + s
  2112. * (-5.185396969318389892578125e-2f + s
  2113. * (2.476101927459239959716796875e-2f + s
  2114. * (-7.0547382347285747528076171875e-3f))));
  2115. phi = if_then_else(xabs < yabs, 1.0f/4.0f - phi, phi);
  2116. phi = if_then_else(X < 0.0f , 1.0f/2.0f - phi, phi);
  2117. phi = if_then_else(Y < 0.0f , 1.0f - phi , phi);
  2118. phi = if_then_else(phi != phi , 0 , phi); // Check for NaN.
  2119. r = phi;
  2120. }
  2121. STAGE(xy_to_radius, Ctx::None) {
  2122. F X2 = r * r,
  2123. Y2 = g * g;
  2124. r = sqrt_(X2 + Y2);
  2125. }
  2126. // Please see https://skia.org/dev/design/conical for how our 2pt conical shader works.
  2127. STAGE(negate_x, Ctx::None) { r = -r; }
  2128. STAGE(xy_to_2pt_conical_strip, const SkRasterPipeline_2PtConicalCtx* ctx) {
  2129. F x = r, y = g, &t = r;
  2130. t = x + sqrt_(ctx->fP0 - y*y); // ctx->fP0 = r0 * r0
  2131. }
  2132. STAGE(xy_to_2pt_conical_focal_on_circle, Ctx::None) {
  2133. F x = r, y = g, &t = r;
  2134. t = x + y*y / x; // (x^2 + y^2) / x
  2135. }
  2136. STAGE(xy_to_2pt_conical_well_behaved, const SkRasterPipeline_2PtConicalCtx* ctx) {
  2137. F x = r, y = g, &t = r;
  2138. t = sqrt_(x*x + y*y) - x * ctx->fP0; // ctx->fP0 = 1/r1
  2139. }
  2140. STAGE(xy_to_2pt_conical_greater, const SkRasterPipeline_2PtConicalCtx* ctx) {
  2141. F x = r, y = g, &t = r;
  2142. t = sqrt_(x*x - y*y) - x * ctx->fP0; // ctx->fP0 = 1/r1
  2143. }
  2144. STAGE(xy_to_2pt_conical_smaller, const SkRasterPipeline_2PtConicalCtx* ctx) {
  2145. F x = r, y = g, &t = r;
  2146. t = -sqrt_(x*x - y*y) - x * ctx->fP0; // ctx->fP0 = 1/r1
  2147. }
  2148. STAGE(alter_2pt_conical_compensate_focal, const SkRasterPipeline_2PtConicalCtx* ctx) {
  2149. F& t = r;
  2150. t = t + ctx->fP1; // ctx->fP1 = f
  2151. }
  2152. STAGE(alter_2pt_conical_unswap, Ctx::None) {
  2153. F& t = r;
  2154. t = 1 - t;
  2155. }
  2156. STAGE(mask_2pt_conical_nan, SkRasterPipeline_2PtConicalCtx* c) {
  2157. F& t = r;
  2158. auto is_degenerate = (t != t); // NaN
  2159. t = if_then_else(is_degenerate, F(0), t);
  2160. sk_unaligned_store(&c->fMask, cond_to_mask(!is_degenerate));
  2161. }
  2162. STAGE(mask_2pt_conical_degenerates, SkRasterPipeline_2PtConicalCtx* c) {
  2163. F& t = r;
  2164. auto is_degenerate = (t <= 0) | (t != t);
  2165. t = if_then_else(is_degenerate, F(0), t);
  2166. sk_unaligned_store(&c->fMask, cond_to_mask(!is_degenerate));
  2167. }
  2168. STAGE(apply_vector_mask, const uint32_t* ctx) {
  2169. const U32 mask = sk_unaligned_load<U32>(ctx);
  2170. r = bit_cast<F>(bit_cast<U32>(r) & mask);
  2171. g = bit_cast<F>(bit_cast<U32>(g) & mask);
  2172. b = bit_cast<F>(bit_cast<U32>(b) & mask);
  2173. a = bit_cast<F>(bit_cast<U32>(a) & mask);
  2174. }
  2175. STAGE(save_xy, SkRasterPipeline_SamplerCtx* c) {
  2176. // Whether bilinear or bicubic, all sample points are at the same fractional offset (fx,fy).
  2177. // They're either the 4 corners of a logical 1x1 pixel or the 16 corners of a 3x3 grid
  2178. // surrounding (x,y) at (0.5,0.5) off-center.
  2179. F fx = fract(r + 0.5f),
  2180. fy = fract(g + 0.5f);
  2181. // Samplers will need to load x and fx, or y and fy.
  2182. sk_unaligned_store(c->x, r);
  2183. sk_unaligned_store(c->y, g);
  2184. sk_unaligned_store(c->fx, fx);
  2185. sk_unaligned_store(c->fy, fy);
  2186. }
  2187. STAGE(accumulate, const SkRasterPipeline_SamplerCtx* c) {
  2188. // Bilinear and bicubic filters are both separable, so we produce independent contributions
  2189. // from x and y, multiplying them together here to get each pixel's total scale factor.
  2190. auto scale = sk_unaligned_load<F>(c->scalex)
  2191. * sk_unaligned_load<F>(c->scaley);
  2192. dr = mad(scale, r, dr);
  2193. dg = mad(scale, g, dg);
  2194. db = mad(scale, b, db);
  2195. da = mad(scale, a, da);
  2196. }
  2197. // In bilinear interpolation, the 4 pixels at +/- 0.5 offsets from the sample pixel center
  2198. // are combined in direct proportion to their area overlapping that logical query pixel.
  2199. // At positive offsets, the x-axis contribution to that rectangle is fx, or (1-fx) at negative x.
  2200. // The y-axis is symmetric.
  2201. template <int kScale>
  2202. SI void bilinear_x(SkRasterPipeline_SamplerCtx* ctx, F* x) {
  2203. *x = sk_unaligned_load<F>(ctx->x) + (kScale * 0.5f);
  2204. F fx = sk_unaligned_load<F>(ctx->fx);
  2205. F scalex;
  2206. if (kScale == -1) { scalex = 1.0f - fx; }
  2207. if (kScale == +1) { scalex = fx; }
  2208. sk_unaligned_store(ctx->scalex, scalex);
  2209. }
  2210. template <int kScale>
  2211. SI void bilinear_y(SkRasterPipeline_SamplerCtx* ctx, F* y) {
  2212. *y = sk_unaligned_load<F>(ctx->y) + (kScale * 0.5f);
  2213. F fy = sk_unaligned_load<F>(ctx->fy);
  2214. F scaley;
  2215. if (kScale == -1) { scaley = 1.0f - fy; }
  2216. if (kScale == +1) { scaley = fy; }
  2217. sk_unaligned_store(ctx->scaley, scaley);
  2218. }
  2219. STAGE(bilinear_nx, SkRasterPipeline_SamplerCtx* ctx) { bilinear_x<-1>(ctx, &r); }
  2220. STAGE(bilinear_px, SkRasterPipeline_SamplerCtx* ctx) { bilinear_x<+1>(ctx, &r); }
  2221. STAGE(bilinear_ny, SkRasterPipeline_SamplerCtx* ctx) { bilinear_y<-1>(ctx, &g); }
  2222. STAGE(bilinear_py, SkRasterPipeline_SamplerCtx* ctx) { bilinear_y<+1>(ctx, &g); }
  2223. // In bicubic interpolation, the 16 pixels and +/- 0.5 and +/- 1.5 offsets from the sample
  2224. // pixel center are combined with a non-uniform cubic filter, with higher values near the center.
  2225. //
  2226. // We break this function into two parts, one for near 0.5 offsets and one for far 1.5 offsets.
  2227. // See GrCubicEffect for details of this particular filter.
  2228. SI F bicubic_near(F t) {
  2229. // 1/18 + 9/18t + 27/18t^2 - 21/18t^3 == t ( t ( -21/18t + 27/18) + 9/18) + 1/18
  2230. return mad(t, mad(t, mad((-21/18.0f), t, (27/18.0f)), (9/18.0f)), (1/18.0f));
  2231. }
  2232. SI F bicubic_far(F t) {
  2233. // 0/18 + 0/18*t - 6/18t^2 + 7/18t^3 == t^2 (7/18t - 6/18)
  2234. return (t*t)*mad((7/18.0f), t, (-6/18.0f));
  2235. }
  2236. template <int kScale>
  2237. SI void bicubic_x(SkRasterPipeline_SamplerCtx* ctx, F* x) {
  2238. *x = sk_unaligned_load<F>(ctx->x) + (kScale * 0.5f);
  2239. F fx = sk_unaligned_load<F>(ctx->fx);
  2240. F scalex;
  2241. if (kScale == -3) { scalex = bicubic_far (1.0f - fx); }
  2242. if (kScale == -1) { scalex = bicubic_near(1.0f - fx); }
  2243. if (kScale == +1) { scalex = bicubic_near( fx); }
  2244. if (kScale == +3) { scalex = bicubic_far ( fx); }
  2245. sk_unaligned_store(ctx->scalex, scalex);
  2246. }
  2247. template <int kScale>
  2248. SI void bicubic_y(SkRasterPipeline_SamplerCtx* ctx, F* y) {
  2249. *y = sk_unaligned_load<F>(ctx->y) + (kScale * 0.5f);
  2250. F fy = sk_unaligned_load<F>(ctx->fy);
  2251. F scaley;
  2252. if (kScale == -3) { scaley = bicubic_far (1.0f - fy); }
  2253. if (kScale == -1) { scaley = bicubic_near(1.0f - fy); }
  2254. if (kScale == +1) { scaley = bicubic_near( fy); }
  2255. if (kScale == +3) { scaley = bicubic_far ( fy); }
  2256. sk_unaligned_store(ctx->scaley, scaley);
  2257. }
  2258. STAGE(bicubic_n3x, SkRasterPipeline_SamplerCtx* ctx) { bicubic_x<-3>(ctx, &r); }
  2259. STAGE(bicubic_n1x, SkRasterPipeline_SamplerCtx* ctx) { bicubic_x<-1>(ctx, &r); }
  2260. STAGE(bicubic_p1x, SkRasterPipeline_SamplerCtx* ctx) { bicubic_x<+1>(ctx, &r); }
  2261. STAGE(bicubic_p3x, SkRasterPipeline_SamplerCtx* ctx) { bicubic_x<+3>(ctx, &r); }
  2262. STAGE(bicubic_n3y, SkRasterPipeline_SamplerCtx* ctx) { bicubic_y<-3>(ctx, &g); }
  2263. STAGE(bicubic_n1y, SkRasterPipeline_SamplerCtx* ctx) { bicubic_y<-1>(ctx, &g); }
  2264. STAGE(bicubic_p1y, SkRasterPipeline_SamplerCtx* ctx) { bicubic_y<+1>(ctx, &g); }
  2265. STAGE(bicubic_p3y, SkRasterPipeline_SamplerCtx* ctx) { bicubic_y<+3>(ctx, &g); }
  2266. STAGE(callback, SkRasterPipeline_CallbackCtx* c) {
  2267. store4(c->rgba,0, r,g,b,a);
  2268. c->fn(c, tail ? tail : N);
  2269. load4(c->read_from,0, &r,&g,&b,&a);
  2270. }
  2271. // shader: void main(float x, float y, inout half4 color)
  2272. // colorfilter: void main(inout half4 color)
  2273. STAGE(interpreter, SkRasterPipeline_InterpreterCtx* c) {
  2274. // If N is less than the interpreter's VecWidth, then we are doing more work than necessary in
  2275. // the interpreter. This is a known issue, and will be addressed at some point.
  2276. float xx[N], yy[N],
  2277. rr[N], gg[N], bb[N], aa[N];
  2278. float* args[] = { xx, yy, rr, gg, bb, aa };
  2279. float** in_args = args;
  2280. int in_count = 6;
  2281. if (c->shaderConvention) {
  2282. // our caller must have called seed_shader to set these
  2283. sk_unaligned_store(xx, r);
  2284. sk_unaligned_store(yy, g);
  2285. sk_unaligned_store(rr, F(c->paintColor.fR));
  2286. sk_unaligned_store(gg, F(c->paintColor.fG));
  2287. sk_unaligned_store(bb, F(c->paintColor.fB));
  2288. sk_unaligned_store(aa, F(c->paintColor.fA));
  2289. } else {
  2290. in_args += 2; // skip x,y
  2291. in_count = 4;
  2292. sk_unaligned_store(rr, r);
  2293. sk_unaligned_store(gg, g);
  2294. sk_unaligned_store(bb, b);
  2295. sk_unaligned_store(aa, a);
  2296. }
  2297. SkAssertResult(c->byteCode->runStriped(c->fn, in_args, in_count, tail ? tail : N,
  2298. (const float*)c->inputs, c->ninputs, nullptr, 0));
  2299. r = sk_unaligned_load<F>(rr);
  2300. g = sk_unaligned_load<F>(gg);
  2301. b = sk_unaligned_load<F>(bb);
  2302. a = sk_unaligned_load<F>(aa);
  2303. }
  2304. STAGE(gauss_a_to_rgba, Ctx::None) {
  2305. // x = 1 - x;
  2306. // exp(-x * x * 4) - 0.018f;
  2307. // ... now approximate with quartic
  2308. //
  2309. const float c4 = -2.26661229133605957031f;
  2310. const float c3 = 2.89795351028442382812f;
  2311. const float c2 = 0.21345567703247070312f;
  2312. const float c1 = 0.15489584207534790039f;
  2313. const float c0 = 0.00030726194381713867f;
  2314. a = mad(a, mad(a, mad(a, mad(a, c4, c3), c2), c1), c0);
  2315. r = a;
  2316. g = a;
  2317. b = a;
  2318. }
  2319. // A specialized fused image shader for clamp-x, clamp-y, non-sRGB sampling.
  2320. STAGE(bilerp_clamp_8888, const SkRasterPipeline_GatherCtx* ctx) {
  2321. // (cx,cy) are the center of our sample.
  2322. F cx = r,
  2323. cy = g;
  2324. // All sample points are at the same fractional offset (fx,fy).
  2325. // They're the 4 corners of a logical 1x1 pixel surrounding (x,y) at (0.5,0.5) offsets.
  2326. F fx = fract(cx + 0.5f),
  2327. fy = fract(cy + 0.5f);
  2328. // We'll accumulate the color of all four samples into {r,g,b,a} directly.
  2329. r = g = b = a = 0;
  2330. for (float dy = -0.5f; dy <= +0.5f; dy += 1.0f)
  2331. for (float dx = -0.5f; dx <= +0.5f; dx += 1.0f) {
  2332. // (x,y) are the coordinates of this sample point.
  2333. F x = cx + dx,
  2334. y = cy + dy;
  2335. // ix_and_ptr() will clamp to the image's bounds for us.
  2336. const uint32_t* ptr;
  2337. U32 ix = ix_and_ptr(&ptr, ctx, x,y);
  2338. F sr,sg,sb,sa;
  2339. from_8888(gather(ptr, ix), &sr,&sg,&sb,&sa);
  2340. // In bilinear interpolation, the 4 pixels at +/- 0.5 offsets from the sample pixel center
  2341. // are combined in direct proportion to their area overlapping that logical query pixel.
  2342. // At positive offsets, the x-axis contribution to that rectangle is fx,
  2343. // or (1-fx) at negative x. Same deal for y.
  2344. F sx = (dx > 0) ? fx : 1.0f - fx,
  2345. sy = (dy > 0) ? fy : 1.0f - fy,
  2346. area = sx * sy;
  2347. r += sr * area;
  2348. g += sg * area;
  2349. b += sb * area;
  2350. a += sa * area;
  2351. }
  2352. }
  2353. // ~~~~~~ GrSwizzle stage ~~~~~~ //
  2354. STAGE(swizzle, void* ctx) {
  2355. auto ir = r, ig = g, ib = b, ia = a;
  2356. F* o[] = {&r, &g, &b, &a};
  2357. char swiz[4];
  2358. memcpy(swiz, &ctx, sizeof(swiz));
  2359. for (int i = 0; i < 4; ++i) {
  2360. switch (swiz[i]) {
  2361. case 'r': *o[i] = ir; break;
  2362. case 'g': *o[i] = ig; break;
  2363. case 'b': *o[i] = ib; break;
  2364. case 'a': *o[i] = ia; break;
  2365. case '0': *o[i] = F(0); break;
  2366. case '1': *o[i] = F(1); break;
  2367. default: break;
  2368. }
  2369. }
  2370. }
  2371. namespace lowp {
  2372. #if defined(JUMPER_IS_SCALAR) || defined(SK_DISABLE_LOWP_RASTER_PIPELINE)
  2373. // If we're not compiled by Clang, or otherwise switched into scalar mode (old Clang, manually),
  2374. // we don't generate lowp stages. All these nullptrs will tell SkJumper.cpp to always use the
  2375. // highp float pipeline.
  2376. #define M(st) static void (*st)(void) = nullptr;
  2377. SK_RASTER_PIPELINE_STAGES(M)
  2378. #undef M
  2379. static void (*just_return)(void) = nullptr;
  2380. static void start_pipeline(size_t,size_t,size_t,size_t, void**) {}
  2381. #else // We are compiling vector code with Clang... let's make some lowp stages!
  2382. #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
  2383. using U8 = uint8_t __attribute__((ext_vector_type(16)));
  2384. using U16 = uint16_t __attribute__((ext_vector_type(16)));
  2385. using I16 = int16_t __attribute__((ext_vector_type(16)));
  2386. using I32 = int32_t __attribute__((ext_vector_type(16)));
  2387. using U32 = uint32_t __attribute__((ext_vector_type(16)));
  2388. using F = float __attribute__((ext_vector_type(16)));
  2389. #else
  2390. using U8 = uint8_t __attribute__((ext_vector_type(8)));
  2391. using U16 = uint16_t __attribute__((ext_vector_type(8)));
  2392. using I16 = int16_t __attribute__((ext_vector_type(8)));
  2393. using I32 = int32_t __attribute__((ext_vector_type(8)));
  2394. using U32 = uint32_t __attribute__((ext_vector_type(8)));
  2395. using F = float __attribute__((ext_vector_type(8)));
  2396. #endif
  2397. static const size_t N = sizeof(U16) / sizeof(uint16_t);
  2398. // Once again, some platforms benefit from a restricted Stage calling convention,
  2399. // but others can pass tons and tons of registers and we're happy to exploit that.
  2400. // It's exactly the same decision and implementation strategy as the F stages above.
  2401. #if JUMPER_NARROW_STAGES
  2402. struct Params {
  2403. size_t dx, dy, tail;
  2404. U16 dr,dg,db,da;
  2405. };
  2406. using Stage = void(ABI*)(Params*, void** program, U16 r, U16 g, U16 b, U16 a);
  2407. #else
  2408. // We pass program as the second argument so that load_and_inc() will find it in %rsi on x86-64.
  2409. using Stage = void (ABI*)(size_t tail, void** program, size_t dx, size_t dy,
  2410. U16 r, U16 g, U16 b, U16 a,
  2411. U16 dr, U16 dg, U16 db, U16 da);
  2412. #endif
  2413. static void start_pipeline(const size_t x0, const size_t y0,
  2414. const size_t xlimit, const size_t ylimit, void** program) {
  2415. auto start = (Stage)load_and_inc(program);
  2416. for (size_t dy = y0; dy < ylimit; dy++) {
  2417. #if JUMPER_NARROW_STAGES
  2418. Params params = { x0,dy,0, 0,0,0,0 };
  2419. for (; params.dx + N <= xlimit; params.dx += N) {
  2420. start(&params,program, 0,0,0,0);
  2421. }
  2422. if (size_t tail = xlimit - params.dx) {
  2423. params.tail = tail;
  2424. start(&params,program, 0,0,0,0);
  2425. }
  2426. #else
  2427. size_t dx = x0;
  2428. for (; dx + N <= xlimit; dx += N) {
  2429. start( 0,program,dx,dy, 0,0,0,0, 0,0,0,0);
  2430. }
  2431. if (size_t tail = xlimit - dx) {
  2432. start(tail,program,dx,dy, 0,0,0,0, 0,0,0,0);
  2433. }
  2434. #endif
  2435. }
  2436. }
  2437. #if JUMPER_NARROW_STAGES
  2438. static void ABI just_return(Params*, void**, U16,U16,U16,U16) {}
  2439. #else
  2440. static void ABI just_return(size_t,void**,size_t,size_t, U16,U16,U16,U16, U16,U16,U16,U16) {}
  2441. #endif
  2442. // All stages use the same function call ABI to chain into each other, but there are three types:
  2443. // GG: geometry in, geometry out -- think, a matrix
  2444. // GP: geometry in, pixels out. -- think, a memory gather
  2445. // PP: pixels in, pixels out. -- think, a blend mode
  2446. //
  2447. // (Some stages ignore their inputs or produce no logical output. That's perfectly fine.)
  2448. //
  2449. // These three STAGE_ macros let you define each type of stage,
  2450. // and will have (x,y) geometry and/or (r,g,b,a, dr,dg,db,da) pixel arguments as appropriate.
  2451. #if JUMPER_NARROW_STAGES
  2452. #define STAGE_GG(name, ...) \
  2453. SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F& x, F& y); \
  2454. static void ABI name(Params* params, void** program, U16 r, U16 g, U16 b, U16 a) { \
  2455. auto x = join<F>(r,g), \
  2456. y = join<F>(b,a); \
  2457. name##_k(Ctx{program}, params->dx,params->dy,params->tail, x,y); \
  2458. split(x, &r,&g); \
  2459. split(y, &b,&a); \
  2460. auto next = (Stage)load_and_inc(program); \
  2461. next(params,program, r,g,b,a); \
  2462. } \
  2463. SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F& x, F& y)
  2464. #define STAGE_GP(name, ...) \
  2465. SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F x, F y, \
  2466. U16& r, U16& g, U16& b, U16& a, \
  2467. U16& dr, U16& dg, U16& db, U16& da); \
  2468. static void ABI name(Params* params, void** program, U16 r, U16 g, U16 b, U16 a) { \
  2469. auto x = join<F>(r,g), \
  2470. y = join<F>(b,a); \
  2471. name##_k(Ctx{program}, params->dx,params->dy,params->tail, x,y, r,g,b,a, \
  2472. params->dr,params->dg,params->db,params->da); \
  2473. auto next = (Stage)load_and_inc(program); \
  2474. next(params,program, r,g,b,a); \
  2475. } \
  2476. SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F x, F y, \
  2477. U16& r, U16& g, U16& b, U16& a, \
  2478. U16& dr, U16& dg, U16& db, U16& da)
  2479. #define STAGE_PP(name, ...) \
  2480. SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, \
  2481. U16& r, U16& g, U16& b, U16& a, \
  2482. U16& dr, U16& dg, U16& db, U16& da); \
  2483. static void ABI name(Params* params, void** program, U16 r, U16 g, U16 b, U16 a) { \
  2484. name##_k(Ctx{program}, params->dx,params->dy,params->tail, r,g,b,a, \
  2485. params->dr,params->dg,params->db,params->da); \
  2486. auto next = (Stage)load_and_inc(program); \
  2487. next(params,program, r,g,b,a); \
  2488. } \
  2489. SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, \
  2490. U16& r, U16& g, U16& b, U16& a, \
  2491. U16& dr, U16& dg, U16& db, U16& da)
  2492. #else
  2493. #define STAGE_GG(name, ...) \
  2494. SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F& x, F& y); \
  2495. static void ABI name(size_t tail, void** program, size_t dx, size_t dy, \
  2496. U16 r, U16 g, U16 b, U16 a, \
  2497. U16 dr, U16 dg, U16 db, U16 da) { \
  2498. auto x = join<F>(r,g), \
  2499. y = join<F>(b,a); \
  2500. name##_k(Ctx{program}, dx,dy,tail, x,y); \
  2501. split(x, &r,&g); \
  2502. split(y, &b,&a); \
  2503. auto next = (Stage)load_and_inc(program); \
  2504. next(tail,program,dx,dy, r,g,b,a, dr,dg,db,da); \
  2505. } \
  2506. SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F& x, F& y)
  2507. #define STAGE_GP(name, ...) \
  2508. SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F x, F y, \
  2509. U16& r, U16& g, U16& b, U16& a, \
  2510. U16& dr, U16& dg, U16& db, U16& da); \
  2511. static void ABI name(size_t tail, void** program, size_t dx, size_t dy, \
  2512. U16 r, U16 g, U16 b, U16 a, \
  2513. U16 dr, U16 dg, U16 db, U16 da) { \
  2514. auto x = join<F>(r,g), \
  2515. y = join<F>(b,a); \
  2516. name##_k(Ctx{program}, dx,dy,tail, x,y, r,g,b,a, dr,dg,db,da); \
  2517. auto next = (Stage)load_and_inc(program); \
  2518. next(tail,program,dx,dy, r,g,b,a, dr,dg,db,da); \
  2519. } \
  2520. SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F x, F y, \
  2521. U16& r, U16& g, U16& b, U16& a, \
  2522. U16& dr, U16& dg, U16& db, U16& da)
  2523. #define STAGE_PP(name, ...) \
  2524. SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, \
  2525. U16& r, U16& g, U16& b, U16& a, \
  2526. U16& dr, U16& dg, U16& db, U16& da); \
  2527. static void ABI name(size_t tail, void** program, size_t dx, size_t dy, \
  2528. U16 r, U16 g, U16 b, U16 a, \
  2529. U16 dr, U16 dg, U16 db, U16 da) { \
  2530. name##_k(Ctx{program}, dx,dy,tail, r,g,b,a, dr,dg,db,da); \
  2531. auto next = (Stage)load_and_inc(program); \
  2532. next(tail,program,dx,dy, r,g,b,a, dr,dg,db,da); \
  2533. } \
  2534. SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, \
  2535. U16& r, U16& g, U16& b, U16& a, \
  2536. U16& dr, U16& dg, U16& db, U16& da)
  2537. #endif
  2538. // ~~~~~~ Commonly used helper functions ~~~~~~ //
  2539. SI U16 div255(U16 v) {
  2540. #if 0
  2541. return (v+127)/255; // The ideal rounding divide by 255.
  2542. #elif 1 && defined(JUMPER_IS_NEON)
  2543. // With NEON we can compute (v+127)/255 as (v + ((v+128)>>8) + 128)>>8
  2544. // just as fast as we can do the approximation below, so might as well be correct!
  2545. // First we compute v + ((v+128)>>8), then one more round of (...+128)>>8 to finish up.
  2546. return vrshrq_n_u16(vrsraq_n_u16(v, v, 8), 8);
  2547. #else
  2548. return (v+255)/256; // A good approximation of (v+127)/255.
  2549. #endif
  2550. }
  2551. SI U16 inv(U16 v) { return 255-v; }
  2552. SI U16 if_then_else(I16 c, U16 t, U16 e) { return (t & c) | (e & ~c); }
  2553. SI U32 if_then_else(I32 c, U32 t, U32 e) { return (t & c) | (e & ~c); }
  2554. SI U16 max(U16 x, U16 y) { return if_then_else(x < y, y, x); }
  2555. SI U16 min(U16 x, U16 y) { return if_then_else(x < y, x, y); }
  2556. SI U16 max(U16 x, U16 y, U16 z) { return max(x, max(y, z)); }
  2557. SI U16 min(U16 x, U16 y, U16 z) { return min(x, min(y, z)); }
  2558. SI U16 from_float(float f) { return f * 255.0f + 0.5f; }
  2559. SI U16 lerp(U16 from, U16 to, U16 t) { return div255( from*inv(t) + to*t ); }
  2560. template <typename D, typename S>
  2561. SI D cast(S src) {
  2562. return __builtin_convertvector(src, D);
  2563. }
  2564. template <typename D, typename S>
  2565. SI void split(S v, D* lo, D* hi) {
  2566. static_assert(2*sizeof(D) == sizeof(S), "");
  2567. memcpy(lo, (const char*)&v + 0*sizeof(D), sizeof(D));
  2568. memcpy(hi, (const char*)&v + 1*sizeof(D), sizeof(D));
  2569. }
  2570. template <typename D, typename S>
  2571. SI D join(S lo, S hi) {
  2572. static_assert(sizeof(D) == 2*sizeof(S), "");
  2573. D v;
  2574. memcpy((char*)&v + 0*sizeof(S), &lo, sizeof(S));
  2575. memcpy((char*)&v + 1*sizeof(S), &hi, sizeof(S));
  2576. return v;
  2577. }
  2578. SI F if_then_else(I32 c, F t, F e) {
  2579. return bit_cast<F>( (bit_cast<I32>(t) & c) | (bit_cast<I32>(e) & ~c) );
  2580. }
  2581. SI F max(F x, F y) { return if_then_else(x < y, y, x); }
  2582. SI F min(F x, F y) { return if_then_else(x < y, x, y); }
  2583. SI F mad(F f, F m, F a) { return f*m+a; }
  2584. SI U32 trunc_(F x) { return (U32)cast<I32>(x); }
  2585. SI F rcp(F x) {
  2586. #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
  2587. __m256 lo,hi;
  2588. split(x, &lo,&hi);
  2589. return join<F>(_mm256_rcp_ps(lo), _mm256_rcp_ps(hi));
  2590. #elif defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
  2591. __m128 lo,hi;
  2592. split(x, &lo,&hi);
  2593. return join<F>(_mm_rcp_ps(lo), _mm_rcp_ps(hi));
  2594. #elif defined(JUMPER_IS_NEON)
  2595. auto rcp = [](float32x4_t v) {
  2596. auto est = vrecpeq_f32(v);
  2597. return vrecpsq_f32(v,est)*est;
  2598. };
  2599. float32x4_t lo,hi;
  2600. split(x, &lo,&hi);
  2601. return join<F>(rcp(lo), rcp(hi));
  2602. #else
  2603. return 1.0f / x;
  2604. #endif
  2605. }
  2606. SI F sqrt_(F x) {
  2607. #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
  2608. __m256 lo,hi;
  2609. split(x, &lo,&hi);
  2610. return join<F>(_mm256_sqrt_ps(lo), _mm256_sqrt_ps(hi));
  2611. #elif defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
  2612. __m128 lo,hi;
  2613. split(x, &lo,&hi);
  2614. return join<F>(_mm_sqrt_ps(lo), _mm_sqrt_ps(hi));
  2615. #elif defined(SK_CPU_ARM64)
  2616. float32x4_t lo,hi;
  2617. split(x, &lo,&hi);
  2618. return join<F>(vsqrtq_f32(lo), vsqrtq_f32(hi));
  2619. #elif defined(JUMPER_IS_NEON)
  2620. auto sqrt = [](float32x4_t v) {
  2621. auto est = vrsqrteq_f32(v); // Estimate and two refinement steps for est = rsqrt(v).
  2622. est *= vrsqrtsq_f32(v,est*est);
  2623. est *= vrsqrtsq_f32(v,est*est);
  2624. return v*est; // sqrt(v) == v*rsqrt(v).
  2625. };
  2626. float32x4_t lo,hi;
  2627. split(x, &lo,&hi);
  2628. return join<F>(sqrt(lo), sqrt(hi));
  2629. #else
  2630. return F{
  2631. sqrtf(x[0]), sqrtf(x[1]), sqrtf(x[2]), sqrtf(x[3]),
  2632. sqrtf(x[4]), sqrtf(x[5]), sqrtf(x[6]), sqrtf(x[7]),
  2633. };
  2634. #endif
  2635. }
  2636. SI F floor_(F x) {
  2637. #if defined(SK_CPU_ARM64)
  2638. float32x4_t lo,hi;
  2639. split(x, &lo,&hi);
  2640. return join<F>(vrndmq_f32(lo), vrndmq_f32(hi));
  2641. #elif defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
  2642. __m256 lo,hi;
  2643. split(x, &lo,&hi);
  2644. return join<F>(_mm256_floor_ps(lo), _mm256_floor_ps(hi));
  2645. #elif defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
  2646. __m128 lo,hi;
  2647. split(x, &lo,&hi);
  2648. return join<F>(_mm_floor_ps(lo), _mm_floor_ps(hi));
  2649. #else
  2650. F roundtrip = cast<F>(cast<I32>(x));
  2651. return roundtrip - if_then_else(roundtrip > x, F(1), F(0));
  2652. #endif
  2653. }
  2654. SI F fract(F x) { return x - floor_(x); }
  2655. SI F abs_(F x) { return bit_cast<F>( bit_cast<I32>(x) & 0x7fffffff ); }
  2656. // ~~~~~~ Basic / misc. stages ~~~~~~ //
  2657. STAGE_GG(seed_shader, Ctx::None) {
  2658. static const float iota[] = {
  2659. 0.5f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5f, 6.5f, 7.5f,
  2660. 8.5f, 9.5f,10.5f,11.5f,12.5f,13.5f,14.5f,15.5f,
  2661. };
  2662. x = cast<F>(I32(dx)) + sk_unaligned_load<F>(iota);
  2663. y = cast<F>(I32(dy)) + 0.5f;
  2664. }
  2665. STAGE_GG(matrix_translate, const float* m) {
  2666. x += m[0];
  2667. y += m[1];
  2668. }
  2669. STAGE_GG(matrix_scale_translate, const float* m) {
  2670. x = mad(x,m[0], m[2]);
  2671. y = mad(y,m[1], m[3]);
  2672. }
  2673. STAGE_GG(matrix_2x3, const float* m) {
  2674. auto X = mad(x,m[0], mad(y,m[2], m[4])),
  2675. Y = mad(x,m[1], mad(y,m[3], m[5]));
  2676. x = X;
  2677. y = Y;
  2678. }
  2679. STAGE_GG(matrix_perspective, const float* m) {
  2680. // N.B. Unlike the other matrix_ stages, this matrix is row-major.
  2681. auto X = mad(x,m[0], mad(y,m[1], m[2])),
  2682. Y = mad(x,m[3], mad(y,m[4], m[5])),
  2683. Z = mad(x,m[6], mad(y,m[7], m[8]));
  2684. x = X * rcp(Z);
  2685. y = Y * rcp(Z);
  2686. }
  2687. STAGE_PP(uniform_color, const SkRasterPipeline_UniformColorCtx* c) {
  2688. r = c->rgba[0];
  2689. g = c->rgba[1];
  2690. b = c->rgba[2];
  2691. a = c->rgba[3];
  2692. }
  2693. STAGE_PP(black_color, Ctx::None) { r = g = b = 0; a = 255; }
  2694. STAGE_PP(white_color, Ctx::None) { r = g = b = 255; a = 255; }
  2695. STAGE_PP(set_rgb, const float rgb[3]) {
  2696. r = from_float(rgb[0]);
  2697. g = from_float(rgb[1]);
  2698. b = from_float(rgb[2]);
  2699. }
  2700. STAGE_PP(clamp_0, Ctx::None) { /*definitely a noop*/ }
  2701. STAGE_PP(clamp_1, Ctx::None) { /*_should_ be a noop*/ }
  2702. STAGE_PP(clamp_a, Ctx::None) {
  2703. r = min(r, a);
  2704. g = min(g, a);
  2705. b = min(b, a);
  2706. }
  2707. STAGE_PP(clamp_gamut, Ctx::None) {
  2708. // It shouldn't be possible to get out-of-gamut
  2709. // colors when working in lowp.
  2710. }
  2711. STAGE_PP(premul, Ctx::None) {
  2712. r = div255(r * a);
  2713. g = div255(g * a);
  2714. b = div255(b * a);
  2715. }
  2716. STAGE_PP(premul_dst, Ctx::None) {
  2717. dr = div255(dr * da);
  2718. dg = div255(dg * da);
  2719. db = div255(db * da);
  2720. }
  2721. STAGE_PP(force_opaque , Ctx::None) { a = 255; }
  2722. STAGE_PP(force_opaque_dst, Ctx::None) { da = 255; }
  2723. STAGE_PP(swap_rb, Ctx::None) {
  2724. auto tmp = r;
  2725. r = b;
  2726. b = tmp;
  2727. }
  2728. STAGE_PP(swap_rb_dst, Ctx::None) {
  2729. auto tmp = dr;
  2730. dr = db;
  2731. db = tmp;
  2732. }
  2733. STAGE_PP(move_src_dst, Ctx::None) {
  2734. dr = r;
  2735. dg = g;
  2736. db = b;
  2737. da = a;
  2738. }
  2739. STAGE_PP(move_dst_src, Ctx::None) {
  2740. r = dr;
  2741. g = dg;
  2742. b = db;
  2743. a = da;
  2744. }
  2745. // ~~~~~~ Blend modes ~~~~~~ //
  2746. // The same logic applied to all 4 channels.
  2747. #define BLEND_MODE(name) \
  2748. SI U16 name##_channel(U16 s, U16 d, U16 sa, U16 da); \
  2749. STAGE_PP(name, Ctx::None) { \
  2750. r = name##_channel(r,dr,a,da); \
  2751. g = name##_channel(g,dg,a,da); \
  2752. b = name##_channel(b,db,a,da); \
  2753. a = name##_channel(a,da,a,da); \
  2754. } \
  2755. SI U16 name##_channel(U16 s, U16 d, U16 sa, U16 da)
  2756. BLEND_MODE(clear) { return 0; }
  2757. BLEND_MODE(srcatop) { return div255( s*da + d*inv(sa) ); }
  2758. BLEND_MODE(dstatop) { return div255( d*sa + s*inv(da) ); }
  2759. BLEND_MODE(srcin) { return div255( s*da ); }
  2760. BLEND_MODE(dstin) { return div255( d*sa ); }
  2761. BLEND_MODE(srcout) { return div255( s*inv(da) ); }
  2762. BLEND_MODE(dstout) { return div255( d*inv(sa) ); }
  2763. BLEND_MODE(srcover) { return s + div255( d*inv(sa) ); }
  2764. BLEND_MODE(dstover) { return d + div255( s*inv(da) ); }
  2765. BLEND_MODE(modulate) { return div255( s*d ); }
  2766. BLEND_MODE(multiply) { return div255( s*inv(da) + d*inv(sa) + s*d ); }
  2767. BLEND_MODE(plus_) { return min(s+d, 255); }
  2768. BLEND_MODE(screen) { return s + d - div255( s*d ); }
  2769. BLEND_MODE(xor_) { return div255( s*inv(da) + d*inv(sa) ); }
  2770. #undef BLEND_MODE
  2771. // The same logic applied to color, and srcover for alpha.
  2772. #define BLEND_MODE(name) \
  2773. SI U16 name##_channel(U16 s, U16 d, U16 sa, U16 da); \
  2774. STAGE_PP(name, Ctx::None) { \
  2775. r = name##_channel(r,dr,a,da); \
  2776. g = name##_channel(g,dg,a,da); \
  2777. b = name##_channel(b,db,a,da); \
  2778. a = a + div255( da*inv(a) ); \
  2779. } \
  2780. SI U16 name##_channel(U16 s, U16 d, U16 sa, U16 da)
  2781. BLEND_MODE(darken) { return s + d - div255( max(s*da, d*sa) ); }
  2782. BLEND_MODE(lighten) { return s + d - div255( min(s*da, d*sa) ); }
  2783. BLEND_MODE(difference) { return s + d - 2*div255( min(s*da, d*sa) ); }
  2784. BLEND_MODE(exclusion) { return s + d - 2*div255( s*d ); }
  2785. BLEND_MODE(hardlight) {
  2786. return div255( s*inv(da) + d*inv(sa) +
  2787. if_then_else(2*s <= sa, 2*s*d, sa*da - 2*(sa-s)*(da-d)) );
  2788. }
  2789. BLEND_MODE(overlay) {
  2790. return div255( s*inv(da) + d*inv(sa) +
  2791. if_then_else(2*d <= da, 2*s*d, sa*da - 2*(sa-s)*(da-d)) );
  2792. }
  2793. #undef BLEND_MODE
  2794. // ~~~~~~ Helpers for interacting with memory ~~~~~~ //
  2795. template <typename T>
  2796. SI T* ptr_at_xy(const SkRasterPipeline_MemoryCtx* ctx, size_t dx, size_t dy) {
  2797. return (T*)ctx->pixels + dy*ctx->stride + dx;
  2798. }
  2799. template <typename T>
  2800. SI U32 ix_and_ptr(T** ptr, const SkRasterPipeline_GatherCtx* ctx, F x, F y) {
  2801. auto clamp = [](F v, F limit) {
  2802. limit = bit_cast<F>( bit_cast<U32>(limit) - 1 ); // Exclusive -> inclusive.
  2803. return min(max(0, v), limit);
  2804. };
  2805. x = clamp(x, ctx->width);
  2806. y = clamp(y, ctx->height);
  2807. *ptr = (const T*)ctx->pixels;
  2808. return trunc_(y)*ctx->stride + trunc_(x);
  2809. }
  2810. template <typename V, typename T>
  2811. SI V load(const T* ptr, size_t tail) {
  2812. V v = 0;
  2813. switch (tail & (N-1)) {
  2814. case 0: memcpy(&v, ptr, sizeof(v)); break;
  2815. #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
  2816. case 15: v[14] = ptr[14];
  2817. case 14: v[13] = ptr[13];
  2818. case 13: v[12] = ptr[12];
  2819. case 12: memcpy(&v, ptr, 12*sizeof(T)); break;
  2820. case 11: v[10] = ptr[10];
  2821. case 10: v[ 9] = ptr[ 9];
  2822. case 9: v[ 8] = ptr[ 8];
  2823. case 8: memcpy(&v, ptr, 8*sizeof(T)); break;
  2824. #endif
  2825. case 7: v[ 6] = ptr[ 6];
  2826. case 6: v[ 5] = ptr[ 5];
  2827. case 5: v[ 4] = ptr[ 4];
  2828. case 4: memcpy(&v, ptr, 4*sizeof(T)); break;
  2829. case 3: v[ 2] = ptr[ 2];
  2830. case 2: memcpy(&v, ptr, 2*sizeof(T)); break;
  2831. case 1: v[ 0] = ptr[ 0];
  2832. }
  2833. return v;
  2834. }
  2835. template <typename V, typename T>
  2836. SI void store(T* ptr, size_t tail, V v) {
  2837. switch (tail & (N-1)) {
  2838. case 0: memcpy(ptr, &v, sizeof(v)); break;
  2839. #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
  2840. case 15: ptr[14] = v[14];
  2841. case 14: ptr[13] = v[13];
  2842. case 13: ptr[12] = v[12];
  2843. case 12: memcpy(ptr, &v, 12*sizeof(T)); break;
  2844. case 11: ptr[10] = v[10];
  2845. case 10: ptr[ 9] = v[ 9];
  2846. case 9: ptr[ 8] = v[ 8];
  2847. case 8: memcpy(ptr, &v, 8*sizeof(T)); break;
  2848. #endif
  2849. case 7: ptr[ 6] = v[ 6];
  2850. case 6: ptr[ 5] = v[ 5];
  2851. case 5: ptr[ 4] = v[ 4];
  2852. case 4: memcpy(ptr, &v, 4*sizeof(T)); break;
  2853. case 3: ptr[ 2] = v[ 2];
  2854. case 2: memcpy(ptr, &v, 2*sizeof(T)); break;
  2855. case 1: ptr[ 0] = v[ 0];
  2856. }
  2857. }
  2858. #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
  2859. template <typename V, typename T>
  2860. SI V gather(const T* ptr, U32 ix) {
  2861. return V{ ptr[ix[ 0]], ptr[ix[ 1]], ptr[ix[ 2]], ptr[ix[ 3]],
  2862. ptr[ix[ 4]], ptr[ix[ 5]], ptr[ix[ 6]], ptr[ix[ 7]],
  2863. ptr[ix[ 8]], ptr[ix[ 9]], ptr[ix[10]], ptr[ix[11]],
  2864. ptr[ix[12]], ptr[ix[13]], ptr[ix[14]], ptr[ix[15]], };
  2865. }
  2866. template<>
  2867. F gather(const float* ptr, U32 ix) {
  2868. __m256i lo, hi;
  2869. split(ix, &lo, &hi);
  2870. return join<F>(_mm256_i32gather_ps(ptr, lo, 4),
  2871. _mm256_i32gather_ps(ptr, hi, 4));
  2872. }
  2873. template<>
  2874. U32 gather(const uint32_t* ptr, U32 ix) {
  2875. __m256i lo, hi;
  2876. split(ix, &lo, &hi);
  2877. return join<U32>(_mm256_i32gather_epi32(ptr, lo, 4),
  2878. _mm256_i32gather_epi32(ptr, hi, 4));
  2879. }
  2880. #else
  2881. template <typename V, typename T>
  2882. SI V gather(const T* ptr, U32 ix) {
  2883. return V{ ptr[ix[ 0]], ptr[ix[ 1]], ptr[ix[ 2]], ptr[ix[ 3]],
  2884. ptr[ix[ 4]], ptr[ix[ 5]], ptr[ix[ 6]], ptr[ix[ 7]], };
  2885. }
  2886. #endif
  2887. // ~~~~~~ 32-bit memory loads and stores ~~~~~~ //
  2888. SI void from_8888(U32 rgba, U16* r, U16* g, U16* b, U16* a) {
  2889. #if 1 && defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
  2890. // Swap the middle 128-bit lanes to make _mm256_packus_epi32() in cast_U16() work out nicely.
  2891. __m256i _01,_23;
  2892. split(rgba, &_01, &_23);
  2893. __m256i _02 = _mm256_permute2x128_si256(_01,_23, 0x20),
  2894. _13 = _mm256_permute2x128_si256(_01,_23, 0x31);
  2895. rgba = join<U32>(_02, _13);
  2896. auto cast_U16 = [](U32 v) -> U16 {
  2897. __m256i _02,_13;
  2898. split(v, &_02,&_13);
  2899. return _mm256_packus_epi32(_02,_13);
  2900. };
  2901. #else
  2902. auto cast_U16 = [](U32 v) -> U16 {
  2903. return cast<U16>(v);
  2904. };
  2905. #endif
  2906. *r = cast_U16(rgba & 65535) & 255;
  2907. *g = cast_U16(rgba & 65535) >> 8;
  2908. *b = cast_U16(rgba >> 16) & 255;
  2909. *a = cast_U16(rgba >> 16) >> 8;
  2910. }
  2911. SI void load_8888_(const uint32_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
  2912. #if 1 && defined(JUMPER_IS_NEON)
  2913. uint8x8x4_t rgba;
  2914. switch (tail & (N-1)) {
  2915. case 0: rgba = vld4_u8 ((const uint8_t*)(ptr+0) ); break;
  2916. case 7: rgba = vld4_lane_u8((const uint8_t*)(ptr+6), rgba, 6);
  2917. case 6: rgba = vld4_lane_u8((const uint8_t*)(ptr+5), rgba, 5);
  2918. case 5: rgba = vld4_lane_u8((const uint8_t*)(ptr+4), rgba, 4);
  2919. case 4: rgba = vld4_lane_u8((const uint8_t*)(ptr+3), rgba, 3);
  2920. case 3: rgba = vld4_lane_u8((const uint8_t*)(ptr+2), rgba, 2);
  2921. case 2: rgba = vld4_lane_u8((const uint8_t*)(ptr+1), rgba, 1);
  2922. case 1: rgba = vld4_lane_u8((const uint8_t*)(ptr+0), rgba, 0);
  2923. }
  2924. *r = cast<U16>(rgba.val[0]);
  2925. *g = cast<U16>(rgba.val[1]);
  2926. *b = cast<U16>(rgba.val[2]);
  2927. *a = cast<U16>(rgba.val[3]);
  2928. #else
  2929. from_8888(load<U32>(ptr, tail), r,g,b,a);
  2930. #endif
  2931. }
  2932. SI void store_8888_(uint32_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
  2933. #if 1 && defined(JUMPER_IS_NEON)
  2934. uint8x8x4_t rgba = {{
  2935. cast<U8>(r),
  2936. cast<U8>(g),
  2937. cast<U8>(b),
  2938. cast<U8>(a),
  2939. }};
  2940. switch (tail & (N-1)) {
  2941. case 0: vst4_u8 ((uint8_t*)(ptr+0), rgba ); break;
  2942. case 7: vst4_lane_u8((uint8_t*)(ptr+6), rgba, 6);
  2943. case 6: vst4_lane_u8((uint8_t*)(ptr+5), rgba, 5);
  2944. case 5: vst4_lane_u8((uint8_t*)(ptr+4), rgba, 4);
  2945. case 4: vst4_lane_u8((uint8_t*)(ptr+3), rgba, 3);
  2946. case 3: vst4_lane_u8((uint8_t*)(ptr+2), rgba, 2);
  2947. case 2: vst4_lane_u8((uint8_t*)(ptr+1), rgba, 1);
  2948. case 1: vst4_lane_u8((uint8_t*)(ptr+0), rgba, 0);
  2949. }
  2950. #else
  2951. store(ptr, tail, cast<U32>(r | (g<<8)) << 0
  2952. | cast<U32>(b | (a<<8)) << 16);
  2953. #endif
  2954. }
  2955. STAGE_PP(load_8888, const SkRasterPipeline_MemoryCtx* ctx) {
  2956. load_8888_(ptr_at_xy<const uint32_t>(ctx, dx,dy), tail, &r,&g,&b,&a);
  2957. }
  2958. STAGE_PP(load_8888_dst, const SkRasterPipeline_MemoryCtx* ctx) {
  2959. load_8888_(ptr_at_xy<const uint32_t>(ctx, dx,dy), tail, &dr,&dg,&db,&da);
  2960. }
  2961. STAGE_PP(store_8888, const SkRasterPipeline_MemoryCtx* ctx) {
  2962. store_8888_(ptr_at_xy<uint32_t>(ctx, dx,dy), tail, r,g,b,a);
  2963. }
  2964. STAGE_GP(gather_8888, const SkRasterPipeline_GatherCtx* ctx) {
  2965. const uint32_t* ptr;
  2966. U32 ix = ix_and_ptr(&ptr, ctx, x,y);
  2967. from_8888(gather<U32>(ptr, ix), &r, &g, &b, &a);
  2968. }
  2969. // ~~~~~~ 16-bit memory loads and stores ~~~~~~ //
  2970. SI void from_565(U16 rgb, U16* r, U16* g, U16* b) {
  2971. // Format for 565 buffers: 15|rrrrr gggggg bbbbb|0
  2972. U16 R = (rgb >> 11) & 31,
  2973. G = (rgb >> 5) & 63,
  2974. B = (rgb >> 0) & 31;
  2975. // These bit replications are the same as multiplying by 255/31 or 255/63 to scale to 8-bit.
  2976. *r = (R << 3) | (R >> 2);
  2977. *g = (G << 2) | (G >> 4);
  2978. *b = (B << 3) | (B >> 2);
  2979. }
  2980. SI void load_565_(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
  2981. from_565(load<U16>(ptr, tail), r,g,b);
  2982. }
  2983. SI void store_565_(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b) {
  2984. // Round from [0,255] to [0,31] or [0,63], as if x * (31/255.0f) + 0.5f.
  2985. // (Don't feel like you need to find some fundamental truth in these...
  2986. // they were brute-force searched.)
  2987. U16 R = (r * 9 + 36) / 74, // 9/74 ≈ 31/255, plus 36/74, about half.
  2988. G = (g * 21 + 42) / 85, // 21/85 = 63/255 exactly.
  2989. B = (b * 9 + 36) / 74;
  2990. // Pack them back into 15|rrrrr gggggg bbbbb|0.
  2991. store(ptr, tail, R << 11
  2992. | G << 5
  2993. | B << 0);
  2994. }
  2995. STAGE_PP(load_565, const SkRasterPipeline_MemoryCtx* ctx) {
  2996. load_565_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &r,&g,&b);
  2997. a = 255;
  2998. }
  2999. STAGE_PP(load_565_dst, const SkRasterPipeline_MemoryCtx* ctx) {
  3000. load_565_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &dr,&dg,&db);
  3001. da = 255;
  3002. }
  3003. STAGE_PP(store_565, const SkRasterPipeline_MemoryCtx* ctx) {
  3004. store_565_(ptr_at_xy<uint16_t>(ctx, dx,dy), tail, r,g,b);
  3005. }
  3006. STAGE_GP(gather_565, const SkRasterPipeline_GatherCtx* ctx) {
  3007. const uint16_t* ptr;
  3008. U32 ix = ix_and_ptr(&ptr, ctx, x,y);
  3009. from_565(gather<U16>(ptr, ix), &r, &g, &b);
  3010. a = 255;
  3011. }
  3012. SI void from_4444(U16 rgba, U16* r, U16* g, U16* b, U16* a) {
  3013. // Format for 4444 buffers: 15|rrrr gggg bbbb aaaa|0.
  3014. U16 R = (rgba >> 12) & 15,
  3015. G = (rgba >> 8) & 15,
  3016. B = (rgba >> 4) & 15,
  3017. A = (rgba >> 0) & 15;
  3018. // Scale [0,15] to [0,255].
  3019. *r = (R << 4) | R;
  3020. *g = (G << 4) | G;
  3021. *b = (B << 4) | B;
  3022. *a = (A << 4) | A;
  3023. }
  3024. SI void load_4444_(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
  3025. from_4444(load<U16>(ptr, tail), r,g,b,a);
  3026. }
  3027. SI void store_4444_(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
  3028. // Round from [0,255] to [0,15], producing the same value as (x*(15/255.0f) + 0.5f).
  3029. U16 R = (r + 8) / 17,
  3030. G = (g + 8) / 17,
  3031. B = (b + 8) / 17,
  3032. A = (a + 8) / 17;
  3033. // Pack them back into 15|rrrr gggg bbbb aaaa|0.
  3034. store(ptr, tail, R << 12
  3035. | G << 8
  3036. | B << 4
  3037. | A << 0);
  3038. }
  3039. STAGE_PP(load_4444, const SkRasterPipeline_MemoryCtx* ctx) {
  3040. load_4444_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &r,&g,&b,&a);
  3041. }
  3042. STAGE_PP(load_4444_dst, const SkRasterPipeline_MemoryCtx* ctx) {
  3043. load_4444_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &dr,&dg,&db,&da);
  3044. }
  3045. STAGE_PP(store_4444, const SkRasterPipeline_MemoryCtx* ctx) {
  3046. store_4444_(ptr_at_xy<uint16_t>(ctx, dx,dy), tail, r,g,b,a);
  3047. }
  3048. STAGE_GP(gather_4444, const SkRasterPipeline_GatherCtx* ctx) {
  3049. const uint16_t* ptr;
  3050. U32 ix = ix_and_ptr(&ptr, ctx, x,y);
  3051. from_4444(gather<U16>(ptr, ix), &r,&g,&b,&a);
  3052. }
  3053. SI void from_88(U16 rg, U16* r, U16* g) {
  3054. *r = (rg & 0xFF);
  3055. *g = (rg >> 8);
  3056. }
  3057. SI void load_88_(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
  3058. #if 1 && defined(JUMPER_IS_NEON)
  3059. uint8x8x2_t rg;
  3060. switch (tail & (N-1)) {
  3061. case 0: rg = vld2_u8 ((const uint8_t*)(ptr+0) ); break;
  3062. case 7: rg = vld2_lane_u8((const uint8_t*)(ptr+6), rg, 6);
  3063. case 6: rg = vld2_lane_u8((const uint8_t*)(ptr+5), rg, 5);
  3064. case 5: rg = vld2_lane_u8((const uint8_t*)(ptr+4), rg, 4);
  3065. case 4: rg = vld2_lane_u8((const uint8_t*)(ptr+3), rg, 3);
  3066. case 3: rg = vld2_lane_u8((const uint8_t*)(ptr+2), rg, 2);
  3067. case 2: rg = vld2_lane_u8((const uint8_t*)(ptr+1), rg, 1);
  3068. case 1: rg = vld2_lane_u8((const uint8_t*)(ptr+0), rg, 0);
  3069. }
  3070. *r = cast<U16>(rg.val[0]);
  3071. *g = cast<U16>(rg.val[1]);
  3072. #else
  3073. from_88(load<U16>(ptr, tail), r,g);
  3074. #endif
  3075. }
  3076. SI void store_88_(uint16_t* ptr, size_t tail, U16 r, U16 g) {
  3077. #if 1 && defined(JUMPER_IS_NEON)
  3078. uint8x8x2_t rg = {{
  3079. cast<U8>(r),
  3080. cast<U8>(g),
  3081. }};
  3082. switch (tail & (N-1)) {
  3083. case 0: vst2_u8 ((uint8_t*)(ptr+0), rg ); break;
  3084. case 7: vst2_lane_u8((uint8_t*)(ptr+6), rg, 6);
  3085. case 6: vst2_lane_u8((uint8_t*)(ptr+5), rg, 5);
  3086. case 5: vst2_lane_u8((uint8_t*)(ptr+4), rg, 4);
  3087. case 4: vst2_lane_u8((uint8_t*)(ptr+3), rg, 3);
  3088. case 3: vst2_lane_u8((uint8_t*)(ptr+2), rg, 2);
  3089. case 2: vst2_lane_u8((uint8_t*)(ptr+1), rg, 1);
  3090. case 1: vst2_lane_u8((uint8_t*)(ptr+0), rg, 0);
  3091. }
  3092. #else
  3093. store(ptr, tail, cast<U16>(r | (g<<8)) << 0);
  3094. #endif
  3095. }
  3096. STAGE_PP(load_rg88, const SkRasterPipeline_MemoryCtx* ctx) {
  3097. b = 0;
  3098. a = 255;
  3099. load_88_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &r,&g);
  3100. }
  3101. STAGE_PP(store_rg88, const SkRasterPipeline_MemoryCtx* ctx) {
  3102. store_88_(ptr_at_xy<uint16_t>(ctx, dx, dy), tail, r, g);
  3103. }
  3104. // ~~~~~~ 8-bit memory loads and stores ~~~~~~ //
  3105. SI U16 load_8(const uint8_t* ptr, size_t tail) {
  3106. return cast<U16>(load<U8>(ptr, tail));
  3107. }
  3108. SI void store_8(uint8_t* ptr, size_t tail, U16 v) {
  3109. store(ptr, tail, cast<U8>(v));
  3110. }
  3111. STAGE_PP(load_a8, const SkRasterPipeline_MemoryCtx* ctx) {
  3112. r = g = b = 0;
  3113. a = load_8(ptr_at_xy<const uint8_t>(ctx, dx,dy), tail);
  3114. }
  3115. STAGE_PP(load_a8_dst, const SkRasterPipeline_MemoryCtx* ctx) {
  3116. dr = dg = db = 0;
  3117. da = load_8(ptr_at_xy<const uint8_t>(ctx, dx,dy), tail);
  3118. }
  3119. STAGE_PP(store_a8, const SkRasterPipeline_MemoryCtx* ctx) {
  3120. store_8(ptr_at_xy<uint8_t>(ctx, dx,dy), tail, a);
  3121. }
  3122. STAGE_GP(gather_a8, const SkRasterPipeline_GatherCtx* ctx) {
  3123. const uint8_t* ptr;
  3124. U32 ix = ix_and_ptr(&ptr, ctx, x,y);
  3125. r = g = b = 0;
  3126. a = cast<U16>(gather<U8>(ptr, ix));
  3127. }
  3128. STAGE_PP(alpha_to_gray, Ctx::None) {
  3129. r = g = b = a;
  3130. a = 255;
  3131. }
  3132. STAGE_PP(alpha_to_gray_dst, Ctx::None) {
  3133. dr = dg = db = da;
  3134. da = 255;
  3135. }
  3136. STAGE_PP(bt709_luminance_or_luma_to_alpha, Ctx::None) {
  3137. a = (r*54 + g*183 + b*19)/256; // 0.2126, 0.7152, 0.0722 with 256 denominator.
  3138. r = g = b = 0;
  3139. }
  3140. // ~~~~~~ Coverage scales / lerps ~~~~~~ //
  3141. STAGE_PP(load_src, const uint16_t* ptr) {
  3142. r = sk_unaligned_load<U16>(ptr + 0*N);
  3143. g = sk_unaligned_load<U16>(ptr + 1*N);
  3144. b = sk_unaligned_load<U16>(ptr + 2*N);
  3145. a = sk_unaligned_load<U16>(ptr + 3*N);
  3146. }
  3147. STAGE_PP(store_src, uint16_t* ptr) {
  3148. sk_unaligned_store(ptr + 0*N, r);
  3149. sk_unaligned_store(ptr + 1*N, g);
  3150. sk_unaligned_store(ptr + 2*N, b);
  3151. sk_unaligned_store(ptr + 3*N, a);
  3152. }
  3153. STAGE_PP(load_dst, const uint16_t* ptr) {
  3154. dr = sk_unaligned_load<U16>(ptr + 0*N);
  3155. dg = sk_unaligned_load<U16>(ptr + 1*N);
  3156. db = sk_unaligned_load<U16>(ptr + 2*N);
  3157. da = sk_unaligned_load<U16>(ptr + 3*N);
  3158. }
  3159. STAGE_PP(store_dst, uint16_t* ptr) {
  3160. sk_unaligned_store(ptr + 0*N, dr);
  3161. sk_unaligned_store(ptr + 1*N, dg);
  3162. sk_unaligned_store(ptr + 2*N, db);
  3163. sk_unaligned_store(ptr + 3*N, da);
  3164. }
  3165. // ~~~~~~ Coverage scales / lerps ~~~~~~ //
  3166. STAGE_PP(scale_1_float, const float* f) {
  3167. U16 c = from_float(*f);
  3168. r = div255( r * c );
  3169. g = div255( g * c );
  3170. b = div255( b * c );
  3171. a = div255( a * c );
  3172. }
  3173. STAGE_PP(lerp_1_float, const float* f) {
  3174. U16 c = from_float(*f);
  3175. r = lerp(dr, r, c);
  3176. g = lerp(dg, g, c);
  3177. b = lerp(db, b, c);
  3178. a = lerp(da, a, c);
  3179. }
  3180. STAGE_PP(lerp_native, const uint16_t scales[]) {
  3181. auto c = sk_unaligned_load<U16>(scales);
  3182. r = lerp(dr, r, c);
  3183. g = lerp(dg, g, c);
  3184. b = lerp(db, b, c);
  3185. a = lerp(da, a, c);
  3186. }
  3187. STAGE_PP(scale_u8, const SkRasterPipeline_MemoryCtx* ctx) {
  3188. U16 c = load_8(ptr_at_xy<const uint8_t>(ctx, dx,dy), tail);
  3189. r = div255( r * c );
  3190. g = div255( g * c );
  3191. b = div255( b * c );
  3192. a = div255( a * c );
  3193. }
  3194. STAGE_PP(lerp_u8, const SkRasterPipeline_MemoryCtx* ctx) {
  3195. U16 c = load_8(ptr_at_xy<const uint8_t>(ctx, dx,dy), tail);
  3196. r = lerp(dr, r, c);
  3197. g = lerp(dg, g, c);
  3198. b = lerp(db, b, c);
  3199. a = lerp(da, a, c);
  3200. }
  3201. // Derive alpha's coverage from rgb coverage and the values of src and dst alpha.
  3202. SI U16 alpha_coverage_from_rgb_coverage(U16 a, U16 da, U16 cr, U16 cg, U16 cb) {
  3203. return if_then_else(a < da, min(cr,cg,cb)
  3204. , max(cr,cg,cb));
  3205. }
  3206. STAGE_PP(scale_565, const SkRasterPipeline_MemoryCtx* ctx) {
  3207. U16 cr,cg,cb;
  3208. load_565_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &cr,&cg,&cb);
  3209. U16 ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
  3210. r = div255( r * cr );
  3211. g = div255( g * cg );
  3212. b = div255( b * cb );
  3213. a = div255( a * ca );
  3214. }
  3215. STAGE_PP(lerp_565, const SkRasterPipeline_MemoryCtx* ctx) {
  3216. U16 cr,cg,cb;
  3217. load_565_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &cr,&cg,&cb);
  3218. U16 ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
  3219. r = lerp(dr, r, cr);
  3220. g = lerp(dg, g, cg);
  3221. b = lerp(db, b, cb);
  3222. a = lerp(da, a, ca);
  3223. }
  3224. STAGE_PP(emboss, const SkRasterPipeline_EmbossCtx* ctx) {
  3225. U16 mul = load_8(ptr_at_xy<const uint8_t>(&ctx->mul, dx,dy), tail),
  3226. add = load_8(ptr_at_xy<const uint8_t>(&ctx->add, dx,dy), tail);
  3227. r = min(div255(r*mul) + add, a);
  3228. g = min(div255(g*mul) + add, a);
  3229. b = min(div255(b*mul) + add, a);
  3230. }
  3231. // ~~~~~~ Gradient stages ~~~~~~ //
  3232. // Clamp x to [0,1], both sides inclusive (think, gradients).
  3233. // Even repeat and mirror funnel through a clamp to handle bad inputs like +Inf, NaN.
  3234. SI F clamp_01(F v) { return min(max(0, v), 1); }
  3235. STAGE_GG(clamp_x_1 , Ctx::None) { x = clamp_01(x); }
  3236. STAGE_GG(repeat_x_1, Ctx::None) { x = clamp_01(x - floor_(x)); }
  3237. STAGE_GG(mirror_x_1, Ctx::None) {
  3238. auto two = [](F x){ return x+x; };
  3239. x = clamp_01(abs_( (x-1.0f) - two(floor_((x-1.0f)*0.5f)) - 1.0f ));
  3240. }
  3241. SI I16 cond_to_mask_16(I32 cond) { return cast<I16>(cond); }
  3242. STAGE_GG(decal_x, SkRasterPipeline_DecalTileCtx* ctx) {
  3243. auto w = ctx->limit_x;
  3244. sk_unaligned_store(ctx->mask, cond_to_mask_16((0 <= x) & (x < w)));
  3245. }
  3246. STAGE_GG(decal_y, SkRasterPipeline_DecalTileCtx* ctx) {
  3247. auto h = ctx->limit_y;
  3248. sk_unaligned_store(ctx->mask, cond_to_mask_16((0 <= y) & (y < h)));
  3249. }
  3250. STAGE_GG(decal_x_and_y, SkRasterPipeline_DecalTileCtx* ctx) {
  3251. auto w = ctx->limit_x;
  3252. auto h = ctx->limit_y;
  3253. sk_unaligned_store(ctx->mask, cond_to_mask_16((0 <= x) & (x < w) & (0 <= y) & (y < h)));
  3254. }
  3255. STAGE_PP(check_decal_mask, SkRasterPipeline_DecalTileCtx* ctx) {
  3256. auto mask = sk_unaligned_load<U16>(ctx->mask);
  3257. r = r & mask;
  3258. g = g & mask;
  3259. b = b & mask;
  3260. a = a & mask;
  3261. }
  3262. SI void round_F_to_U16(F R, F G, F B, F A, bool interpolatedInPremul,
  3263. U16* r, U16* g, U16* b, U16* a) {
  3264. auto round = [](F x) { return cast<U16>(x * 255.0f + 0.5f); };
  3265. F limit = interpolatedInPremul ? A
  3266. : 1;
  3267. *r = round(min(max(0,R), limit));
  3268. *g = round(min(max(0,G), limit));
  3269. *b = round(min(max(0,B), limit));
  3270. *a = round(A); // we assume alpha is already in [0,1].
  3271. }
  3272. SI void gradient_lookup(const SkRasterPipeline_GradientCtx* c, U32 idx, F t,
  3273. U16* r, U16* g, U16* b, U16* a) {
  3274. F fr, fg, fb, fa, br, bg, bb, ba;
  3275. #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
  3276. if (c->stopCount <=8) {
  3277. __m256i lo, hi;
  3278. split(idx, &lo, &hi);
  3279. fr = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[0]), lo),
  3280. _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[0]), hi));
  3281. br = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[0]), lo),
  3282. _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[0]), hi));
  3283. fg = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[1]), lo),
  3284. _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[1]), hi));
  3285. bg = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[1]), lo),
  3286. _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[1]), hi));
  3287. fb = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[2]), lo),
  3288. _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[2]), hi));
  3289. bb = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[2]), lo),
  3290. _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[2]), hi));
  3291. fa = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[3]), lo),
  3292. _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[3]), hi));
  3293. ba = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[3]), lo),
  3294. _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[3]), hi));
  3295. } else
  3296. #endif
  3297. {
  3298. fr = gather<F>(c->fs[0], idx);
  3299. fg = gather<F>(c->fs[1], idx);
  3300. fb = gather<F>(c->fs[2], idx);
  3301. fa = gather<F>(c->fs[3], idx);
  3302. br = gather<F>(c->bs[0], idx);
  3303. bg = gather<F>(c->bs[1], idx);
  3304. bb = gather<F>(c->bs[2], idx);
  3305. ba = gather<F>(c->bs[3], idx);
  3306. }
  3307. round_F_to_U16(mad(t, fr, br),
  3308. mad(t, fg, bg),
  3309. mad(t, fb, bb),
  3310. mad(t, fa, ba),
  3311. c->interpolatedInPremul,
  3312. r,g,b,a);
  3313. }
  3314. STAGE_GP(gradient, const SkRasterPipeline_GradientCtx* c) {
  3315. auto t = x;
  3316. U32 idx = 0;
  3317. // N.B. The loop starts at 1 because idx 0 is the color to use before the first stop.
  3318. for (size_t i = 1; i < c->stopCount; i++) {
  3319. idx += if_then_else(t >= c->ts[i], U32(1), U32(0));
  3320. }
  3321. gradient_lookup(c, idx, t, &r, &g, &b, &a);
  3322. }
  3323. STAGE_GP(evenly_spaced_gradient, const SkRasterPipeline_GradientCtx* c) {
  3324. auto t = x;
  3325. auto idx = trunc_(t * (c->stopCount-1));
  3326. gradient_lookup(c, idx, t, &r, &g, &b, &a);
  3327. }
  3328. STAGE_GP(evenly_spaced_2_stop_gradient, const SkRasterPipeline_EvenlySpaced2StopGradientCtx* c) {
  3329. auto t = x;
  3330. round_F_to_U16(mad(t, c->f[0], c->b[0]),
  3331. mad(t, c->f[1], c->b[1]),
  3332. mad(t, c->f[2], c->b[2]),
  3333. mad(t, c->f[3], c->b[3]),
  3334. c->interpolatedInPremul,
  3335. &r,&g,&b,&a);
  3336. }
  3337. STAGE_GG(xy_to_unit_angle, Ctx::None) {
  3338. F xabs = abs_(x),
  3339. yabs = abs_(y);
  3340. F slope = min(xabs, yabs)/max(xabs, yabs);
  3341. F s = slope * slope;
  3342. // Use a 7th degree polynomial to approximate atan.
  3343. // This was generated using sollya.gforge.inria.fr.
  3344. // A float optimized polynomial was generated using the following command.
  3345. // P1 = fpminimax((1/(2*Pi))*atan(x),[|1,3,5,7|],[|24...|],[2^(-40),1],relative);
  3346. F phi = slope
  3347. * (0.15912117063999176025390625f + s
  3348. * (-5.185396969318389892578125e-2f + s
  3349. * (2.476101927459239959716796875e-2f + s
  3350. * (-7.0547382347285747528076171875e-3f))));
  3351. phi = if_then_else(xabs < yabs, 1.0f/4.0f - phi, phi);
  3352. phi = if_then_else(x < 0.0f , 1.0f/2.0f - phi, phi);
  3353. phi = if_then_else(y < 0.0f , 1.0f - phi , phi);
  3354. phi = if_then_else(phi != phi , 0 , phi); // Check for NaN.
  3355. x = phi;
  3356. }
  3357. STAGE_GG(xy_to_radius, Ctx::None) {
  3358. x = sqrt_(x*x + y*y);
  3359. }
  3360. // ~~~~~~ Compound stages ~~~~~~ //
  3361. STAGE_PP(srcover_rgba_8888, const SkRasterPipeline_MemoryCtx* ctx) {
  3362. auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
  3363. load_8888_(ptr, tail, &dr,&dg,&db,&da);
  3364. r = r + div255( dr*inv(a) );
  3365. g = g + div255( dg*inv(a) );
  3366. b = b + div255( db*inv(a) );
  3367. a = a + div255( da*inv(a) );
  3368. store_8888_(ptr, tail, r,g,b,a);
  3369. }
  3370. #if defined(SK_DISABLE_LOWP_BILERP_CLAMP_CLAMP_STAGE)
  3371. static void(*bilerp_clamp_8888)(void) = nullptr;
  3372. #else
  3373. STAGE_GP(bilerp_clamp_8888, const SkRasterPipeline_GatherCtx* ctx) {
  3374. // (cx,cy) are the center of our sample.
  3375. F cx = x,
  3376. cy = y;
  3377. // All sample points are at the same fractional offset (fx,fy).
  3378. // They're the 4 corners of a logical 1x1 pixel surrounding (x,y) at (0.5,0.5) offsets.
  3379. F fx = fract(cx + 0.5f),
  3380. fy = fract(cy + 0.5f);
  3381. // We'll accumulate the color of all four samples into {r,g,b,a} directly.
  3382. r = g = b = a = 0;
  3383. // The first three sample points will calculate their area using math
  3384. // just like in the float code above, but the fourth will take up all the rest.
  3385. //
  3386. // Logically this is the same as doing the math for the fourth pixel too,
  3387. // but rounding error makes this a better strategy, keeping opaque opaque, etc.
  3388. //
  3389. // We can keep up to 8 bits of fractional precision without overflowing 16-bit,
  3390. // so our "1.0" area is 256.
  3391. const uint16_t bias = 256;
  3392. U16 remaining = bias;
  3393. for (float dy = -0.5f; dy <= +0.5f; dy += 1.0f)
  3394. for (float dx = -0.5f; dx <= +0.5f; dx += 1.0f) {
  3395. // (x,y) are the coordinates of this sample point.
  3396. F x = cx + dx,
  3397. y = cy + dy;
  3398. // ix_and_ptr() will clamp to the image's bounds for us.
  3399. const uint32_t* ptr;
  3400. U32 ix = ix_and_ptr(&ptr, ctx, x,y);
  3401. U16 sr,sg,sb,sa;
  3402. from_8888(gather<U32>(ptr, ix), &sr,&sg,&sb,&sa);
  3403. // In bilinear interpolation, the 4 pixels at +/- 0.5 offsets from the sample pixel center
  3404. // are combined in direct proportion to their area overlapping that logical query pixel.
  3405. // At positive offsets, the x-axis contribution to that rectangle is fx,
  3406. // or (1-fx) at negative x. Same deal for y.
  3407. F sx = (dx > 0) ? fx : 1.0f - fx,
  3408. sy = (dy > 0) ? fy : 1.0f - fy;
  3409. U16 area = (dy == 0.5f && dx == 0.5f) ? remaining
  3410. : cast<U16>(sx * sy * bias);
  3411. for (size_t i = 0; i < N; i++) {
  3412. SkASSERT(remaining[i] >= area[i]);
  3413. }
  3414. remaining -= area;
  3415. r += sr * area;
  3416. g += sg * area;
  3417. b += sb * area;
  3418. a += sa * area;
  3419. }
  3420. r = (r + bias/2) / bias;
  3421. g = (g + bias/2) / bias;
  3422. b = (b + bias/2) / bias;
  3423. a = (a + bias/2) / bias;
  3424. }
  3425. #endif
  3426. // ~~~~~~ GrSwizzle stage ~~~~~~ //
  3427. STAGE_PP(swizzle, void* ctx) {
  3428. auto ir = r, ig = g, ib = b, ia = a;
  3429. U16* o[] = {&r, &g, &b, &a};
  3430. char swiz[4];
  3431. memcpy(swiz, &ctx, sizeof(swiz));
  3432. for (int i = 0; i < 4; ++i) {
  3433. switch (swiz[i]) {
  3434. case 'r': *o[i] = ir; break;
  3435. case 'g': *o[i] = ig; break;
  3436. case 'b': *o[i] = ib; break;
  3437. case 'a': *o[i] = ia; break;
  3438. case '0': *o[i] = U16(0); break;
  3439. case '1': *o[i] = U16(255); break;
  3440. default: break;
  3441. }
  3442. }
  3443. }
  3444. // Now we'll add null stand-ins for stages we haven't implemented in lowp.
  3445. // If a pipeline uses these stages, it'll boot it out of lowp into highp.
  3446. #define NOT_IMPLEMENTED(st) static void (*st)(void) = nullptr;
  3447. NOT_IMPLEMENTED(callback)
  3448. NOT_IMPLEMENTED(interpreter)
  3449. NOT_IMPLEMENTED(unbounded_set_rgb)
  3450. NOT_IMPLEMENTED(unbounded_uniform_color)
  3451. NOT_IMPLEMENTED(unpremul)
  3452. NOT_IMPLEMENTED(dither) // TODO
  3453. NOT_IMPLEMENTED(from_srgb)
  3454. NOT_IMPLEMENTED(to_srgb)
  3455. NOT_IMPLEMENTED(load_16161616)
  3456. NOT_IMPLEMENTED(store_16161616)
  3457. NOT_IMPLEMENTED(load_a16)
  3458. NOT_IMPLEMENTED(store_a16)
  3459. NOT_IMPLEMENTED(load_rg1616)
  3460. NOT_IMPLEMENTED(store_rg1616)
  3461. NOT_IMPLEMENTED(load_f16)
  3462. NOT_IMPLEMENTED(load_f16_dst)
  3463. NOT_IMPLEMENTED(store_f16)
  3464. NOT_IMPLEMENTED(gather_f16)
  3465. NOT_IMPLEMENTED(load_af16)
  3466. NOT_IMPLEMENTED(store_af16)
  3467. NOT_IMPLEMENTED(load_rgf16)
  3468. NOT_IMPLEMENTED(store_rgf16)
  3469. NOT_IMPLEMENTED(load_f32)
  3470. NOT_IMPLEMENTED(load_f32_dst)
  3471. NOT_IMPLEMENTED(store_f32)
  3472. NOT_IMPLEMENTED(gather_f32)
  3473. NOT_IMPLEMENTED(load_rgf32)
  3474. NOT_IMPLEMENTED(store_rgf32)
  3475. NOT_IMPLEMENTED(load_1010102)
  3476. NOT_IMPLEMENTED(load_1010102_dst)
  3477. NOT_IMPLEMENTED(store_1010102)
  3478. NOT_IMPLEMENTED(gather_1010102)
  3479. NOT_IMPLEMENTED(store_u16_be)
  3480. NOT_IMPLEMENTED(byte_tables) // TODO
  3481. NOT_IMPLEMENTED(colorburn)
  3482. NOT_IMPLEMENTED(colordodge)
  3483. NOT_IMPLEMENTED(softlight)
  3484. NOT_IMPLEMENTED(hue)
  3485. NOT_IMPLEMENTED(saturation)
  3486. NOT_IMPLEMENTED(color)
  3487. NOT_IMPLEMENTED(luminosity)
  3488. NOT_IMPLEMENTED(matrix_3x3)
  3489. NOT_IMPLEMENTED(matrix_3x4)
  3490. NOT_IMPLEMENTED(matrix_4x5) // TODO
  3491. NOT_IMPLEMENTED(matrix_4x3) // TODO
  3492. NOT_IMPLEMENTED(parametric)
  3493. NOT_IMPLEMENTED(gamma_)
  3494. NOT_IMPLEMENTED(rgb_to_hsl)
  3495. NOT_IMPLEMENTED(hsl_to_rgb)
  3496. NOT_IMPLEMENTED(gauss_a_to_rgba) // TODO
  3497. NOT_IMPLEMENTED(mirror_x) // TODO
  3498. NOT_IMPLEMENTED(repeat_x) // TODO
  3499. NOT_IMPLEMENTED(mirror_y) // TODO
  3500. NOT_IMPLEMENTED(repeat_y) // TODO
  3501. NOT_IMPLEMENTED(negate_x)
  3502. NOT_IMPLEMENTED(bilinear_nx) // TODO
  3503. NOT_IMPLEMENTED(bilinear_ny) // TODO
  3504. NOT_IMPLEMENTED(bilinear_px) // TODO
  3505. NOT_IMPLEMENTED(bilinear_py) // TODO
  3506. NOT_IMPLEMENTED(bicubic_n3x) // TODO
  3507. NOT_IMPLEMENTED(bicubic_n1x) // TODO
  3508. NOT_IMPLEMENTED(bicubic_p1x) // TODO
  3509. NOT_IMPLEMENTED(bicubic_p3x) // TODO
  3510. NOT_IMPLEMENTED(bicubic_n3y) // TODO
  3511. NOT_IMPLEMENTED(bicubic_n1y) // TODO
  3512. NOT_IMPLEMENTED(bicubic_p1y) // TODO
  3513. NOT_IMPLEMENTED(bicubic_p3y) // TODO
  3514. NOT_IMPLEMENTED(save_xy) // TODO
  3515. NOT_IMPLEMENTED(accumulate) // TODO
  3516. NOT_IMPLEMENTED(xy_to_2pt_conical_well_behaved)
  3517. NOT_IMPLEMENTED(xy_to_2pt_conical_strip)
  3518. NOT_IMPLEMENTED(xy_to_2pt_conical_focal_on_circle)
  3519. NOT_IMPLEMENTED(xy_to_2pt_conical_smaller)
  3520. NOT_IMPLEMENTED(xy_to_2pt_conical_greater)
  3521. NOT_IMPLEMENTED(alter_2pt_conical_compensate_focal)
  3522. NOT_IMPLEMENTED(alter_2pt_conical_unswap)
  3523. NOT_IMPLEMENTED(mask_2pt_conical_nan)
  3524. NOT_IMPLEMENTED(mask_2pt_conical_degenerates)
  3525. NOT_IMPLEMENTED(apply_vector_mask)
  3526. #undef NOT_IMPLEMENTED
  3527. #endif//defined(JUMPER_IS_SCALAR) controlling whether we build lowp stages
  3528. } // namespace lowp
  3529. } // namespace SK_OPTS_NS
  3530. #endif//SkRasterPipeline_opts_DEFINED