SkNx_neon.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735
  1. /*
  2. * Copyright 2015 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #ifndef SkNx_neon_DEFINED
  8. #define SkNx_neon_DEFINED
  9. #include <arm_neon.h>
  10. namespace { // NOLINT(google-build-namespaces)
  11. // ARMv8 has vrndm(q)_f32 to floor floats. Here we emulate it:
  12. // - roundtrip through integers via truncation
  13. // - subtract 1 if that's too big (possible for negative values).
  14. // This restricts the domain of our inputs to a maximum somehwere around 2^31. Seems plenty big.
  15. AI static float32x4_t emulate_vrndmq_f32(float32x4_t v) {
  16. auto roundtrip = vcvtq_f32_s32(vcvtq_s32_f32(v));
  17. auto too_big = vcgtq_f32(roundtrip, v);
  18. return vsubq_f32(roundtrip, (float32x4_t)vandq_u32(too_big, (uint32x4_t)vdupq_n_f32(1)));
  19. }
  20. AI static float32x2_t emulate_vrndm_f32(float32x2_t v) {
  21. auto roundtrip = vcvt_f32_s32(vcvt_s32_f32(v));
  22. auto too_big = vcgt_f32(roundtrip, v);
  23. return vsub_f32(roundtrip, (float32x2_t)vand_u32(too_big, (uint32x2_t)vdup_n_f32(1)));
  24. }
  25. template <>
  26. class SkNx<2, float> {
  27. public:
  28. AI SkNx(float32x2_t vec) : fVec(vec) {}
  29. AI SkNx() {}
  30. AI SkNx(float val) : fVec(vdup_n_f32(val)) {}
  31. AI SkNx(float a, float b) { fVec = (float32x2_t) { a, b }; }
  32. AI static SkNx Load(const void* ptr) { return vld1_f32((const float*)ptr); }
  33. AI void store(void* ptr) const { vst1_f32((float*)ptr, fVec); }
  34. AI static void Load2(const void* ptr, SkNx* x, SkNx* y) {
  35. float32x2x2_t xy = vld2_f32((const float*) ptr);
  36. *x = xy.val[0];
  37. *y = xy.val[1];
  38. }
  39. AI static void Store2(void* dst, const SkNx& a, const SkNx& b) {
  40. float32x2x2_t ab = {{
  41. a.fVec,
  42. b.fVec,
  43. }};
  44. vst2_f32((float*) dst, ab);
  45. }
  46. AI static void Store3(void* dst, const SkNx& a, const SkNx& b, const SkNx& c) {
  47. float32x2x3_t abc = {{
  48. a.fVec,
  49. b.fVec,
  50. c.fVec,
  51. }};
  52. vst3_f32((float*) dst, abc);
  53. }
  54. AI static void Store4(void* dst, const SkNx& a, const SkNx& b, const SkNx& c, const SkNx& d) {
  55. float32x2x4_t abcd = {{
  56. a.fVec,
  57. b.fVec,
  58. c.fVec,
  59. d.fVec,
  60. }};
  61. vst4_f32((float*) dst, abcd);
  62. }
  63. AI SkNx invert() const {
  64. float32x2_t est0 = vrecpe_f32(fVec),
  65. est1 = vmul_f32(vrecps_f32(est0, fVec), est0);
  66. return est1;
  67. }
  68. AI SkNx operator - () const { return vneg_f32(fVec); }
  69. AI SkNx operator + (const SkNx& o) const { return vadd_f32(fVec, o.fVec); }
  70. AI SkNx operator - (const SkNx& o) const { return vsub_f32(fVec, o.fVec); }
  71. AI SkNx operator * (const SkNx& o) const { return vmul_f32(fVec, o.fVec); }
  72. AI SkNx operator / (const SkNx& o) const {
  73. #if defined(SK_CPU_ARM64)
  74. return vdiv_f32(fVec, o.fVec);
  75. #else
  76. float32x2_t est0 = vrecpe_f32(o.fVec),
  77. est1 = vmul_f32(vrecps_f32(est0, o.fVec), est0),
  78. est2 = vmul_f32(vrecps_f32(est1, o.fVec), est1);
  79. return vmul_f32(fVec, est2);
  80. #endif
  81. }
  82. AI SkNx operator==(const SkNx& o) const { return vreinterpret_f32_u32(vceq_f32(fVec, o.fVec)); }
  83. AI SkNx operator <(const SkNx& o) const { return vreinterpret_f32_u32(vclt_f32(fVec, o.fVec)); }
  84. AI SkNx operator >(const SkNx& o) const { return vreinterpret_f32_u32(vcgt_f32(fVec, o.fVec)); }
  85. AI SkNx operator<=(const SkNx& o) const { return vreinterpret_f32_u32(vcle_f32(fVec, o.fVec)); }
  86. AI SkNx operator>=(const SkNx& o) const { return vreinterpret_f32_u32(vcge_f32(fVec, o.fVec)); }
  87. AI SkNx operator!=(const SkNx& o) const {
  88. return vreinterpret_f32_u32(vmvn_u32(vceq_f32(fVec, o.fVec)));
  89. }
  90. AI static SkNx Min(const SkNx& l, const SkNx& r) { return vmin_f32(l.fVec, r.fVec); }
  91. AI static SkNx Max(const SkNx& l, const SkNx& r) { return vmax_f32(l.fVec, r.fVec); }
  92. AI SkNx abs() const { return vabs_f32(fVec); }
  93. AI SkNx floor() const {
  94. #if defined(SK_CPU_ARM64)
  95. return vrndm_f32(fVec);
  96. #else
  97. return emulate_vrndm_f32(fVec);
  98. #endif
  99. }
  100. AI SkNx rsqrt() const {
  101. float32x2_t est0 = vrsqrte_f32(fVec);
  102. return vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0);
  103. }
  104. AI SkNx sqrt() const {
  105. #if defined(SK_CPU_ARM64)
  106. return vsqrt_f32(fVec);
  107. #else
  108. float32x2_t est0 = vrsqrte_f32(fVec),
  109. est1 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0),
  110. est2 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est1, est1)), est1);
  111. return vmul_f32(fVec, est2);
  112. #endif
  113. }
  114. AI float operator[](int k) const {
  115. SkASSERT(0 <= k && k < 2);
  116. union { float32x2_t v; float fs[2]; } pun = {fVec};
  117. return pun.fs[k&1];
  118. }
  119. AI bool allTrue() const {
  120. #if defined(SK_CPU_ARM64)
  121. return 0 != vminv_u32(vreinterpret_u32_f32(fVec));
  122. #else
  123. auto v = vreinterpret_u32_f32(fVec);
  124. return vget_lane_u32(v,0) && vget_lane_u32(v,1);
  125. #endif
  126. }
  127. AI bool anyTrue() const {
  128. #if defined(SK_CPU_ARM64)
  129. return 0 != vmaxv_u32(vreinterpret_u32_f32(fVec));
  130. #else
  131. auto v = vreinterpret_u32_f32(fVec);
  132. return vget_lane_u32(v,0) || vget_lane_u32(v,1);
  133. #endif
  134. }
  135. AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
  136. return vbsl_f32(vreinterpret_u32_f32(fVec), t.fVec, e.fVec);
  137. }
  138. float32x2_t fVec;
  139. };
  140. template <>
  141. class SkNx<4, float> {
  142. public:
  143. AI SkNx(float32x4_t vec) : fVec(vec) {}
  144. AI SkNx() {}
  145. AI SkNx(float val) : fVec(vdupq_n_f32(val)) {}
  146. AI SkNx(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d }; }
  147. AI static SkNx Load(const void* ptr) { return vld1q_f32((const float*)ptr); }
  148. AI void store(void* ptr) const { vst1q_f32((float*)ptr, fVec); }
  149. AI static void Load2(const void* ptr, SkNx* x, SkNx* y) {
  150. float32x4x2_t xy = vld2q_f32((const float*) ptr);
  151. *x = xy.val[0];
  152. *y = xy.val[1];
  153. }
  154. AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
  155. float32x4x4_t rgba = vld4q_f32((const float*) ptr);
  156. *r = rgba.val[0];
  157. *g = rgba.val[1];
  158. *b = rgba.val[2];
  159. *a = rgba.val[3];
  160. }
  161. AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
  162. float32x4x4_t rgba = {{
  163. r.fVec,
  164. g.fVec,
  165. b.fVec,
  166. a.fVec,
  167. }};
  168. vst4q_f32((float*) dst, rgba);
  169. }
  170. AI SkNx invert() const {
  171. float32x4_t est0 = vrecpeq_f32(fVec),
  172. est1 = vmulq_f32(vrecpsq_f32(est0, fVec), est0);
  173. return est1;
  174. }
  175. AI SkNx operator - () const { return vnegq_f32(fVec); }
  176. AI SkNx operator + (const SkNx& o) const { return vaddq_f32(fVec, o.fVec); }
  177. AI SkNx operator - (const SkNx& o) const { return vsubq_f32(fVec, o.fVec); }
  178. AI SkNx operator * (const SkNx& o) const { return vmulq_f32(fVec, o.fVec); }
  179. AI SkNx operator / (const SkNx& o) const {
  180. #if defined(SK_CPU_ARM64)
  181. return vdivq_f32(fVec, o.fVec);
  182. #else
  183. float32x4_t est0 = vrecpeq_f32(o.fVec),
  184. est1 = vmulq_f32(vrecpsq_f32(est0, o.fVec), est0),
  185. est2 = vmulq_f32(vrecpsq_f32(est1, o.fVec), est1);
  186. return vmulq_f32(fVec, est2);
  187. #endif
  188. }
  189. AI SkNx operator==(const SkNx& o) const {return vreinterpretq_f32_u32(vceqq_f32(fVec, o.fVec));}
  190. AI SkNx operator <(const SkNx& o) const {return vreinterpretq_f32_u32(vcltq_f32(fVec, o.fVec));}
  191. AI SkNx operator >(const SkNx& o) const {return vreinterpretq_f32_u32(vcgtq_f32(fVec, o.fVec));}
  192. AI SkNx operator<=(const SkNx& o) const {return vreinterpretq_f32_u32(vcleq_f32(fVec, o.fVec));}
  193. AI SkNx operator>=(const SkNx& o) const {return vreinterpretq_f32_u32(vcgeq_f32(fVec, o.fVec));}
  194. AI SkNx operator!=(const SkNx& o) const {
  195. return vreinterpretq_f32_u32(vmvnq_u32(vceqq_f32(fVec, o.fVec)));
  196. }
  197. AI static SkNx Min(const SkNx& l, const SkNx& r) { return vminq_f32(l.fVec, r.fVec); }
  198. AI static SkNx Max(const SkNx& l, const SkNx& r) { return vmaxq_f32(l.fVec, r.fVec); }
  199. AI SkNx abs() const { return vabsq_f32(fVec); }
  200. AI SkNx floor() const {
  201. #if defined(SK_CPU_ARM64)
  202. return vrndmq_f32(fVec);
  203. #else
  204. return emulate_vrndmq_f32(fVec);
  205. #endif
  206. }
  207. AI SkNx rsqrt() const {
  208. float32x4_t est0 = vrsqrteq_f32(fVec);
  209. return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0);
  210. }
  211. AI SkNx sqrt() const {
  212. #if defined(SK_CPU_ARM64)
  213. return vsqrtq_f32(fVec);
  214. #else
  215. float32x4_t est0 = vrsqrteq_f32(fVec),
  216. est1 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0),
  217. est2 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1);
  218. return vmulq_f32(fVec, est2);
  219. #endif
  220. }
  221. AI float operator[](int k) const {
  222. SkASSERT(0 <= k && k < 4);
  223. union { float32x4_t v; float fs[4]; } pun = {fVec};
  224. return pun.fs[k&3];
  225. }
  226. AI float min() const {
  227. #if defined(SK_CPU_ARM64)
  228. return vminvq_f32(fVec);
  229. #else
  230. SkNx min = Min(*this, vrev64q_f32(fVec));
  231. return SkTMin(min[0], min[2]);
  232. #endif
  233. }
  234. AI float max() const {
  235. #if defined(SK_CPU_ARM64)
  236. return vmaxvq_f32(fVec);
  237. #else
  238. SkNx max = Max(*this, vrev64q_f32(fVec));
  239. return SkTMax(max[0], max[2]);
  240. #endif
  241. }
  242. AI bool allTrue() const {
  243. #if defined(SK_CPU_ARM64)
  244. return 0 != vminvq_u32(vreinterpretq_u32_f32(fVec));
  245. #else
  246. auto v = vreinterpretq_u32_f32(fVec);
  247. return vgetq_lane_u32(v,0) && vgetq_lane_u32(v,1)
  248. && vgetq_lane_u32(v,2) && vgetq_lane_u32(v,3);
  249. #endif
  250. }
  251. AI bool anyTrue() const {
  252. #if defined(SK_CPU_ARM64)
  253. return 0 != vmaxvq_u32(vreinterpretq_u32_f32(fVec));
  254. #else
  255. auto v = vreinterpretq_u32_f32(fVec);
  256. return vgetq_lane_u32(v,0) || vgetq_lane_u32(v,1)
  257. || vgetq_lane_u32(v,2) || vgetq_lane_u32(v,3);
  258. #endif
  259. }
  260. AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
  261. return vbslq_f32(vreinterpretq_u32_f32(fVec), t.fVec, e.fVec);
  262. }
  263. float32x4_t fVec;
  264. };
  265. #if defined(SK_CPU_ARM64)
  266. AI static Sk4f SkNx_fma(const Sk4f& f, const Sk4f& m, const Sk4f& a) {
  267. return vfmaq_f32(a.fVec, f.fVec, m.fVec);
  268. }
  269. #endif
  270. // It's possible that for our current use cases, representing this as
  271. // half a uint16x8_t might be better than representing it as a uint16x4_t.
  272. // It'd make conversion to Sk4b one step simpler.
  273. template <>
  274. class SkNx<4, uint16_t> {
  275. public:
  276. AI SkNx(const uint16x4_t& vec) : fVec(vec) {}
  277. AI SkNx() {}
  278. AI SkNx(uint16_t val) : fVec(vdup_n_u16(val)) {}
  279. AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) {
  280. fVec = (uint16x4_t) { a,b,c,d };
  281. }
  282. AI static SkNx Load(const void* ptr) { return vld1_u16((const uint16_t*)ptr); }
  283. AI void store(void* ptr) const { vst1_u16((uint16_t*)ptr, fVec); }
  284. AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
  285. uint16x4x4_t rgba = vld4_u16((const uint16_t*)ptr);
  286. *r = rgba.val[0];
  287. *g = rgba.val[1];
  288. *b = rgba.val[2];
  289. *a = rgba.val[3];
  290. }
  291. AI static void Load3(const void* ptr, SkNx* r, SkNx* g, SkNx* b) {
  292. uint16x4x3_t rgba = vld3_u16((const uint16_t*)ptr);
  293. *r = rgba.val[0];
  294. *g = rgba.val[1];
  295. *b = rgba.val[2];
  296. }
  297. AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
  298. uint16x4x4_t rgba = {{
  299. r.fVec,
  300. g.fVec,
  301. b.fVec,
  302. a.fVec,
  303. }};
  304. vst4_u16((uint16_t*) dst, rgba);
  305. }
  306. AI SkNx operator + (const SkNx& o) const { return vadd_u16(fVec, o.fVec); }
  307. AI SkNx operator - (const SkNx& o) const { return vsub_u16(fVec, o.fVec); }
  308. AI SkNx operator * (const SkNx& o) const { return vmul_u16(fVec, o.fVec); }
  309. AI SkNx operator & (const SkNx& o) const { return vand_u16(fVec, o.fVec); }
  310. AI SkNx operator | (const SkNx& o) const { return vorr_u16(fVec, o.fVec); }
  311. AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
  312. AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
  313. AI static SkNx Min(const SkNx& a, const SkNx& b) { return vmin_u16(a.fVec, b.fVec); }
  314. AI uint16_t operator[](int k) const {
  315. SkASSERT(0 <= k && k < 4);
  316. union { uint16x4_t v; uint16_t us[4]; } pun = {fVec};
  317. return pun.us[k&3];
  318. }
  319. AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
  320. return vbsl_u16(fVec, t.fVec, e.fVec);
  321. }
  322. uint16x4_t fVec;
  323. };
  324. template <>
  325. class SkNx<8, uint16_t> {
  326. public:
  327. AI SkNx(const uint16x8_t& vec) : fVec(vec) {}
  328. AI SkNx() {}
  329. AI SkNx(uint16_t val) : fVec(vdupq_n_u16(val)) {}
  330. AI static SkNx Load(const void* ptr) { return vld1q_u16((const uint16_t*)ptr); }
  331. AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d,
  332. uint16_t e, uint16_t f, uint16_t g, uint16_t h) {
  333. fVec = (uint16x8_t) { a,b,c,d, e,f,g,h };
  334. }
  335. AI void store(void* ptr) const { vst1q_u16((uint16_t*)ptr, fVec); }
  336. AI SkNx operator + (const SkNx& o) const { return vaddq_u16(fVec, o.fVec); }
  337. AI SkNx operator - (const SkNx& o) const { return vsubq_u16(fVec, o.fVec); }
  338. AI SkNx operator * (const SkNx& o) const { return vmulq_u16(fVec, o.fVec); }
  339. AI SkNx operator & (const SkNx& o) const { return vandq_u16(fVec, o.fVec); }
  340. AI SkNx operator | (const SkNx& o) const { return vorrq_u16(fVec, o.fVec); }
  341. AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
  342. AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
  343. AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u16(a.fVec, b.fVec); }
  344. AI uint16_t operator[](int k) const {
  345. SkASSERT(0 <= k && k < 8);
  346. union { uint16x8_t v; uint16_t us[8]; } pun = {fVec};
  347. return pun.us[k&7];
  348. }
  349. AI SkNx mulHi(const SkNx& m) const {
  350. uint32x4_t hi = vmull_u16(vget_high_u16(fVec), vget_high_u16(m.fVec));
  351. uint32x4_t lo = vmull_u16( vget_low_u16(fVec), vget_low_u16(m.fVec));
  352. return { vcombine_u16(vshrn_n_u32(lo,16), vshrn_n_u32(hi,16)) };
  353. }
  354. AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
  355. return vbslq_u16(fVec, t.fVec, e.fVec);
  356. }
  357. uint16x8_t fVec;
  358. };
  359. template <>
  360. class SkNx<4, uint8_t> {
  361. public:
  362. typedef uint32_t __attribute__((aligned(1))) unaligned_uint32_t;
  363. AI SkNx(const uint8x8_t& vec) : fVec(vec) {}
  364. AI SkNx() {}
  365. AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d) {
  366. fVec = (uint8x8_t){a,b,c,d, 0,0,0,0};
  367. }
  368. AI static SkNx Load(const void* ptr) {
  369. return (uint8x8_t)vld1_dup_u32((const unaligned_uint32_t*)ptr);
  370. }
  371. AI void store(void* ptr) const {
  372. return vst1_lane_u32((unaligned_uint32_t*)ptr, (uint32x2_t)fVec, 0);
  373. }
  374. AI uint8_t operator[](int k) const {
  375. SkASSERT(0 <= k && k < 4);
  376. union { uint8x8_t v; uint8_t us[8]; } pun = {fVec};
  377. return pun.us[k&3];
  378. }
  379. // TODO as needed
  380. uint8x8_t fVec;
  381. };
  382. template <>
  383. class SkNx<8, uint8_t> {
  384. public:
  385. AI SkNx(const uint8x8_t& vec) : fVec(vec) {}
  386. AI SkNx() {}
  387. AI SkNx(uint8_t val) : fVec(vdup_n_u8(val)) {}
  388. AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
  389. uint8_t e, uint8_t f, uint8_t g, uint8_t h) {
  390. fVec = (uint8x8_t) { a,b,c,d, e,f,g,h };
  391. }
  392. AI static SkNx Load(const void* ptr) { return vld1_u8((const uint8_t*)ptr); }
  393. AI void store(void* ptr) const { vst1_u8((uint8_t*)ptr, fVec); }
  394. AI uint8_t operator[](int k) const {
  395. SkASSERT(0 <= k && k < 8);
  396. union { uint8x8_t v; uint8_t us[8]; } pun = {fVec};
  397. return pun.us[k&7];
  398. }
  399. uint8x8_t fVec;
  400. };
  401. template <>
  402. class SkNx<16, uint8_t> {
  403. public:
  404. AI SkNx(const uint8x16_t& vec) : fVec(vec) {}
  405. AI SkNx() {}
  406. AI SkNx(uint8_t val) : fVec(vdupq_n_u8(val)) {}
  407. AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
  408. uint8_t e, uint8_t f, uint8_t g, uint8_t h,
  409. uint8_t i, uint8_t j, uint8_t k, uint8_t l,
  410. uint8_t m, uint8_t n, uint8_t o, uint8_t p) {
  411. fVec = (uint8x16_t) { a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p };
  412. }
  413. AI static SkNx Load(const void* ptr) { return vld1q_u8((const uint8_t*)ptr); }
  414. AI void store(void* ptr) const { vst1q_u8((uint8_t*)ptr, fVec); }
  415. AI SkNx saturatedAdd(const SkNx& o) const { return vqaddq_u8(fVec, o.fVec); }
  416. AI SkNx operator + (const SkNx& o) const { return vaddq_u8(fVec, o.fVec); }
  417. AI SkNx operator - (const SkNx& o) const { return vsubq_u8(fVec, o.fVec); }
  418. AI SkNx operator & (const SkNx& o) const { return vandq_u8(fVec, o.fVec); }
  419. AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u8(a.fVec, b.fVec); }
  420. AI SkNx operator < (const SkNx& o) const { return vcltq_u8(fVec, o.fVec); }
  421. AI uint8_t operator[](int k) const {
  422. SkASSERT(0 <= k && k < 16);
  423. union { uint8x16_t v; uint8_t us[16]; } pun = {fVec};
  424. return pun.us[k&15];
  425. }
  426. AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
  427. return vbslq_u8(fVec, t.fVec, e.fVec);
  428. }
  429. uint8x16_t fVec;
  430. };
  431. template <>
  432. class SkNx<4, int32_t> {
  433. public:
  434. AI SkNx(const int32x4_t& vec) : fVec(vec) {}
  435. AI SkNx() {}
  436. AI SkNx(int32_t v) {
  437. fVec = vdupq_n_s32(v);
  438. }
  439. AI SkNx(int32_t a, int32_t b, int32_t c, int32_t d) {
  440. fVec = (int32x4_t){a,b,c,d};
  441. }
  442. AI static SkNx Load(const void* ptr) {
  443. return vld1q_s32((const int32_t*)ptr);
  444. }
  445. AI void store(void* ptr) const {
  446. return vst1q_s32((int32_t*)ptr, fVec);
  447. }
  448. AI int32_t operator[](int k) const {
  449. SkASSERT(0 <= k && k < 4);
  450. union { int32x4_t v; int32_t is[4]; } pun = {fVec};
  451. return pun.is[k&3];
  452. }
  453. AI SkNx operator + (const SkNx& o) const { return vaddq_s32(fVec, o.fVec); }
  454. AI SkNx operator - (const SkNx& o) const { return vsubq_s32(fVec, o.fVec); }
  455. AI SkNx operator * (const SkNx& o) const { return vmulq_s32(fVec, o.fVec); }
  456. AI SkNx operator & (const SkNx& o) const { return vandq_s32(fVec, o.fVec); }
  457. AI SkNx operator | (const SkNx& o) const { return vorrq_s32(fVec, o.fVec); }
  458. AI SkNx operator ^ (const SkNx& o) const { return veorq_s32(fVec, o.fVec); }
  459. AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
  460. AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
  461. AI SkNx operator == (const SkNx& o) const {
  462. return vreinterpretq_s32_u32(vceqq_s32(fVec, o.fVec));
  463. }
  464. AI SkNx operator < (const SkNx& o) const {
  465. return vreinterpretq_s32_u32(vcltq_s32(fVec, o.fVec));
  466. }
  467. AI SkNx operator > (const SkNx& o) const {
  468. return vreinterpretq_s32_u32(vcgtq_s32(fVec, o.fVec));
  469. }
  470. AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.fVec); }
  471. AI static SkNx Max(const SkNx& a, const SkNx& b) { return vmaxq_s32(a.fVec, b.fVec); }
  472. // TODO as needed
  473. AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
  474. return vbslq_s32(vreinterpretq_u32_s32(fVec), t.fVec, e.fVec);
  475. }
  476. AI SkNx abs() const { return vabsq_s32(fVec); }
  477. int32x4_t fVec;
  478. };
  479. template <>
  480. class SkNx<4, uint32_t> {
  481. public:
  482. AI SkNx(const uint32x4_t& vec) : fVec(vec) {}
  483. AI SkNx() {}
  484. AI SkNx(uint32_t v) {
  485. fVec = vdupq_n_u32(v);
  486. }
  487. AI SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
  488. fVec = (uint32x4_t){a,b,c,d};
  489. }
  490. AI static SkNx Load(const void* ptr) {
  491. return vld1q_u32((const uint32_t*)ptr);
  492. }
  493. AI void store(void* ptr) const {
  494. return vst1q_u32((uint32_t*)ptr, fVec);
  495. }
  496. AI uint32_t operator[](int k) const {
  497. SkASSERT(0 <= k && k < 4);
  498. union { uint32x4_t v; uint32_t us[4]; } pun = {fVec};
  499. return pun.us[k&3];
  500. }
  501. AI SkNx operator + (const SkNx& o) const { return vaddq_u32(fVec, o.fVec); }
  502. AI SkNx operator - (const SkNx& o) const { return vsubq_u32(fVec, o.fVec); }
  503. AI SkNx operator * (const SkNx& o) const { return vmulq_u32(fVec, o.fVec); }
  504. AI SkNx operator & (const SkNx& o) const { return vandq_u32(fVec, o.fVec); }
  505. AI SkNx operator | (const SkNx& o) const { return vorrq_u32(fVec, o.fVec); }
  506. AI SkNx operator ^ (const SkNx& o) const { return veorq_u32(fVec, o.fVec); }
  507. AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
  508. AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
  509. AI SkNx operator == (const SkNx& o) const { return vceqq_u32(fVec, o.fVec); }
  510. AI SkNx operator < (const SkNx& o) const { return vcltq_u32(fVec, o.fVec); }
  511. AI SkNx operator > (const SkNx& o) const { return vcgtq_u32(fVec, o.fVec); }
  512. AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u32(a.fVec, b.fVec); }
  513. // TODO as needed
  514. AI SkNx mulHi(const SkNx& m) const {
  515. uint64x2_t hi = vmull_u32(vget_high_u32(fVec), vget_high_u32(m.fVec));
  516. uint64x2_t lo = vmull_u32( vget_low_u32(fVec), vget_low_u32(m.fVec));
  517. return { vcombine_u32(vshrn_n_u64(lo,32), vshrn_n_u64(hi,32)) };
  518. }
  519. AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
  520. return vbslq_u32(fVec, t.fVec, e.fVec);
  521. }
  522. uint32x4_t fVec;
  523. };
  524. template<> AI /*static*/ Sk4i SkNx_cast<int32_t, float>(const Sk4f& src) {
  525. return vcvtq_s32_f32(src.fVec);
  526. }
  527. template<> AI /*static*/ Sk4f SkNx_cast<float, int32_t>(const Sk4i& src) {
  528. return vcvtq_f32_s32(src.fVec);
  529. }
  530. template<> AI /*static*/ Sk4f SkNx_cast<float, uint32_t>(const Sk4u& src) {
  531. return SkNx_cast<float>(Sk4i::Load(&src));
  532. }
  533. template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) {
  534. return vqmovn_u32(vcvtq_u32_f32(src.fVec));
  535. }
  536. template<> AI /*static*/ Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) {
  537. return vcvtq_f32_u32(vmovl_u16(src.fVec));
  538. }
  539. template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) {
  540. uint32x4_t _32 = vcvtq_u32_f32(src.fVec);
  541. uint16x4_t _16 = vqmovn_u32(_32);
  542. return vqmovn_u16(vcombine_u16(_16, _16));
  543. }
  544. template<> AI /*static*/ Sk4u SkNx_cast<uint32_t, uint8_t>(const Sk4b& src) {
  545. uint16x8_t _16 = vmovl_u8(src.fVec);
  546. return vmovl_u16(vget_low_u16(_16));
  547. }
  548. template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint8_t>(const Sk4b& src) {
  549. return vreinterpretq_s32_u32(SkNx_cast<uint32_t>(src).fVec);
  550. }
  551. template<> AI /*static*/ Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) {
  552. return vcvtq_f32_s32(SkNx_cast<int32_t>(src).fVec);
  553. }
  554. template<> AI /*static*/ Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) {
  555. Sk8f ab, cd;
  556. SkNx_split(src, &ab, &cd);
  557. Sk4f a,b,c,d;
  558. SkNx_split(ab, &a, &b);
  559. SkNx_split(cd, &c, &d);
  560. return vuzpq_u8(vuzpq_u8((uint8x16_t)vcvtq_u32_f32(a.fVec),
  561. (uint8x16_t)vcvtq_u32_f32(b.fVec)).val[0],
  562. vuzpq_u8((uint8x16_t)vcvtq_u32_f32(c.fVec),
  563. (uint8x16_t)vcvtq_u32_f32(d.fVec)).val[0]).val[0];
  564. }
  565. template<> AI /*static*/ Sk8b SkNx_cast<uint8_t, int32_t>(const Sk8i& src) {
  566. Sk4i a, b;
  567. SkNx_split(src, &a, &b);
  568. uint16x4_t a16 = vqmovun_s32(a.fVec);
  569. uint16x4_t b16 = vqmovun_s32(b.fVec);
  570. return vqmovn_u16(vcombine_u16(a16, b16));
  571. }
  572. template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
  573. return vget_low_u16(vmovl_u8(src.fVec));
  574. }
  575. template<> AI /*static*/ Sk8h SkNx_cast<uint16_t, uint8_t>(const Sk8b& src) {
  576. return vmovl_u8(src.fVec);
  577. }
  578. template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) {
  579. return vmovn_u16(vcombine_u16(src.fVec, src.fVec));
  580. }
  581. template<> AI /*static*/ Sk8b SkNx_cast<uint8_t, uint16_t>(const Sk8h& src) {
  582. return vqmovn_u16(src.fVec);
  583. }
  584. template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, int32_t>(const Sk4i& src) {
  585. uint16x4_t _16 = vqmovun_s32(src.fVec);
  586. return vqmovn_u16(vcombine_u16(_16, _16));
  587. }
  588. template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint32_t>(const Sk4u& src) {
  589. uint16x4_t _16 = vqmovn_u32(src.fVec);
  590. return vqmovn_u16(vcombine_u16(_16, _16));
  591. }
  592. template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint16_t>(const Sk4h& src) {
  593. return vreinterpretq_s32_u32(vmovl_u16(src.fVec));
  594. }
  595. template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, int32_t>(const Sk4i& src) {
  596. return vmovn_u32(vreinterpretq_u32_s32(src.fVec));
  597. }
  598. template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint32_t>(const Sk4u& src) {
  599. return vreinterpretq_s32_u32(src.fVec);
  600. }
  601. AI static Sk4i Sk4f_round(const Sk4f& x) {
  602. return vcvtq_s32_f32((x + 0.5f).fVec);
  603. }
  604. } // namespace
  605. #endif//SkNx_neon_DEFINED