GrFragmentProcessor.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466
  1. /*
  2. * Copyright 2014 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #ifndef GrFragmentProcessor_DEFINED
  8. #define GrFragmentProcessor_DEFINED
  9. #include "src/gpu/GrProcessor.h"
  10. #include "src/gpu/ops/GrOp.h"
  11. class GrCoordTransform;
  12. class GrGLSLFragmentProcessor;
  13. class GrPaint;
  14. class GrPipeline;
  15. class GrProcessorKeyBuilder;
  16. class GrShaderCaps;
  17. class GrSwizzle;
  18. /** Provides custom fragment shader code. Fragment processors receive an input color (half4) and
  19. produce an output color. They may reference textures and uniforms. They may use
  20. GrCoordTransforms to receive a transformation of the local coordinates that map from local space
  21. to the fragment being processed.
  22. */
  23. class GrFragmentProcessor : public GrProcessor {
  24. public:
  25. class TextureSampler;
  26. /**
  27. * In many instances (e.g. SkShader::asFragmentProcessor() implementations) it is desirable to
  28. * only consider the input color's alpha. However, there is a competing desire to have reusable
  29. * GrFragmentProcessor subclasses that can be used in other scenarios where the entire input
  30. * color is considered. This function exists to filter the input color and pass it to a FP. It
  31. * does so by returning a parent FP that multiplies the passed in FPs output by the parent's
  32. * input alpha. The passed in FP will not receive an input color.
  33. */
  34. static std::unique_ptr<GrFragmentProcessor> MulChildByInputAlpha(
  35. std::unique_ptr<GrFragmentProcessor> child);
  36. /**
  37. * Like MulChildByInputAlpha(), but reverses the sense of src and dst. In this case, return
  38. * the input modulated by the child's alpha. The passed in FP will not receive an input color.
  39. *
  40. * output = input * child.a
  41. */
  42. static std::unique_ptr<GrFragmentProcessor> MulInputByChildAlpha(
  43. std::unique_ptr<GrFragmentProcessor> child);
  44. /**
  45. * This assumes that the input color to the returned processor will be unpremul and that the
  46. * passed processor (which becomes the returned processor's child) produces a premul output.
  47. * The result of the returned processor is a premul of its input color modulated by the child
  48. * processor's premul output.
  49. */
  50. static std::unique_ptr<GrFragmentProcessor> MakeInputPremulAndMulByOutput(
  51. std::unique_ptr<GrFragmentProcessor>);
  52. /**
  53. * Returns a parent fragment processor that adopts the passed fragment processor as a child.
  54. * The parent will ignore its input color and instead feed the passed in color as input to the
  55. * child.
  56. */
  57. static std::unique_ptr<GrFragmentProcessor> OverrideInput(std::unique_ptr<GrFragmentProcessor>,
  58. const SkPMColor4f&,
  59. bool useUniform = true);
  60. /**
  61. * Returns a fragment processor that premuls the input before calling the passed in fragment
  62. * processor.
  63. */
  64. static std::unique_ptr<GrFragmentProcessor> PremulInput(std::unique_ptr<GrFragmentProcessor>);
  65. /**
  66. * Returns a fragment processor that calls the passed in fragment processor, and then swizzles
  67. * the output.
  68. */
  69. static std::unique_ptr<GrFragmentProcessor> SwizzleOutput(std::unique_ptr<GrFragmentProcessor>,
  70. const GrSwizzle&);
  71. /**
  72. * Returns a fragment processor that runs the passed in array of fragment processors in a
  73. * series. The original input is passed to the first, the first's output is passed to the
  74. * second, etc. The output of the returned processor is the output of the last processor of the
  75. * series.
  76. *
  77. * The array elements with be moved.
  78. */
  79. static std::unique_ptr<GrFragmentProcessor> RunInSeries(std::unique_ptr<GrFragmentProcessor>*,
  80. int cnt);
  81. /**
  82. * Makes a copy of this fragment processor that draws equivalently to the original.
  83. * If the processor has child processors they are cloned as well.
  84. */
  85. virtual std::unique_ptr<GrFragmentProcessor> clone() const = 0;
  86. GrGLSLFragmentProcessor* createGLSLInstance() const;
  87. void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const {
  88. this->onGetGLSLProcessorKey(caps, b);
  89. for (int i = 0; i < fChildProcessors.count(); ++i) {
  90. fChildProcessors[i]->getGLSLProcessorKey(caps, b);
  91. }
  92. }
  93. int numTextureSamplers() const { return fTextureSamplerCnt; }
  94. const TextureSampler& textureSampler(int i) const;
  95. int numCoordTransforms() const { return fCoordTransforms.count(); }
  96. /** Returns the coordinate transformation at index. index must be valid according to
  97. numTransforms(). */
  98. const GrCoordTransform& coordTransform(int index) const { return *fCoordTransforms[index]; }
  99. const SkTArray<const GrCoordTransform*, true>& coordTransforms() const {
  100. return fCoordTransforms;
  101. }
  102. int numChildProcessors() const { return fChildProcessors.count(); }
  103. const GrFragmentProcessor& childProcessor(int index) const { return *fChildProcessors[index]; }
  104. SkDEBUGCODE(bool isInstantiated() const;)
  105. /** Do any of the coordtransforms for this processor require local coords? */
  106. bool usesLocalCoords() const { return SkToBool(fFlags & kUsesLocalCoords_Flag); }
  107. /**
  108. * A GrDrawOp may premultiply its antialiasing coverage into its GrGeometryProcessor's color
  109. * output under the following scenario:
  110. * * all the color fragment processors report true to this query,
  111. * * all the coverage fragment processors report true to this query,
  112. * * the blend mode arithmetic allows for it it.
  113. * To be compatible a fragment processor's output must be a modulation of its input color or
  114. * alpha with a computed premultiplied color or alpha that is in 0..1 range. The computed color
  115. * or alpha that is modulated against the input cannot depend on the input's alpha. The computed
  116. * value cannot depend on the input's color channels unless it unpremultiplies the input color
  117. * channels by the input alpha.
  118. */
  119. bool compatibleWithCoverageAsAlpha() const {
  120. return SkToBool(fFlags & kCompatibleWithCoverageAsAlpha_OptimizationFlag);
  121. }
  122. /**
  123. * If this is true then all opaque input colors to the processor produce opaque output colors.
  124. */
  125. bool preservesOpaqueInput() const {
  126. return SkToBool(fFlags & kPreservesOpaqueInput_OptimizationFlag);
  127. }
  128. /**
  129. * Tests whether given a constant input color the processor produces a constant output color
  130. * (for all fragments). If true outputColor will contain the constant color produces for
  131. * inputColor.
  132. */
  133. bool hasConstantOutputForConstantInput(SkPMColor4f inputColor, SkPMColor4f* outputColor) const {
  134. if (fFlags & kConstantOutputForConstantInput_OptimizationFlag) {
  135. *outputColor = this->constantOutputForConstantInput(inputColor);
  136. return true;
  137. }
  138. return false;
  139. }
  140. bool hasConstantOutputForConstantInput() const {
  141. return SkToBool(fFlags & kConstantOutputForConstantInput_OptimizationFlag);
  142. }
  143. /** Returns true if this and other processor conservatively draw identically. It can only return
  144. true when the two processor are of the same subclass (i.e. they return the same object from
  145. from getFactory()).
  146. A return value of true from isEqual() should not be used to test whether the processor would
  147. generate the same shader code. To test for identical code generation use getGLSLProcessorKey
  148. */
  149. bool isEqual(const GrFragmentProcessor& that) const;
  150. /**
  151. * Pre-order traversal of a FP hierarchy, or of the forest of FPs in a GrPipeline. In the latter
  152. * case the tree rooted at each FP in the GrPipeline is visited successively.
  153. */
  154. class Iter : public SkNoncopyable {
  155. public:
  156. explicit Iter(const GrFragmentProcessor* fp) { fFPStack.push_back(fp); }
  157. explicit Iter(const GrPipeline& pipeline);
  158. explicit Iter(const GrPaint&);
  159. const GrFragmentProcessor* next();
  160. private:
  161. SkSTArray<4, const GrFragmentProcessor*, true> fFPStack;
  162. };
  163. /**
  164. * Iterates over all the Ts owned by a GrFragmentProcessor and its children or over all the Ts
  165. * owned by the forest of GrFragmentProcessors in a GrPipeline. FPs are visited in the same
  166. * order as Iter and each of an FP's Ts are visited in order.
  167. */
  168. template <typename T, int (GrFragmentProcessor::*COUNT)() const,
  169. const T& (GrFragmentProcessor::*GET)(int)const>
  170. class FPItemIter : public SkNoncopyable {
  171. public:
  172. explicit FPItemIter(const GrFragmentProcessor* fp)
  173. : fCurrFP(nullptr)
  174. , fCTIdx(0)
  175. , fFPIter(fp) {
  176. fCurrFP = fFPIter.next();
  177. }
  178. explicit FPItemIter(const GrPipeline& pipeline)
  179. : fCurrFP(nullptr)
  180. , fCTIdx(0)
  181. , fFPIter(pipeline) {
  182. fCurrFP = fFPIter.next();
  183. }
  184. const T* next() {
  185. if (!fCurrFP) {
  186. return nullptr;
  187. }
  188. while (fCTIdx == (fCurrFP->*COUNT)()) {
  189. fCTIdx = 0;
  190. fCurrFP = fFPIter.next();
  191. if (!fCurrFP) {
  192. return nullptr;
  193. }
  194. }
  195. return &(fCurrFP->*GET)(fCTIdx++);
  196. }
  197. private:
  198. const GrFragmentProcessor* fCurrFP;
  199. int fCTIdx;
  200. GrFragmentProcessor::Iter fFPIter;
  201. };
  202. using CoordTransformIter = FPItemIter<GrCoordTransform,
  203. &GrFragmentProcessor::numCoordTransforms,
  204. &GrFragmentProcessor::coordTransform>;
  205. using TextureAccessIter = FPItemIter<TextureSampler,
  206. &GrFragmentProcessor::numTextureSamplers,
  207. &GrFragmentProcessor::textureSampler>;
  208. void visitProxies(const GrOp::VisitProxyFunc& func);
  209. protected:
  210. enum OptimizationFlags : uint32_t {
  211. kNone_OptimizationFlags,
  212. kCompatibleWithCoverageAsAlpha_OptimizationFlag = 0x1,
  213. kPreservesOpaqueInput_OptimizationFlag = 0x2,
  214. kConstantOutputForConstantInput_OptimizationFlag = 0x4,
  215. kAll_OptimizationFlags = kCompatibleWithCoverageAsAlpha_OptimizationFlag |
  216. kPreservesOpaqueInput_OptimizationFlag |
  217. kConstantOutputForConstantInput_OptimizationFlag
  218. };
  219. GR_DECL_BITFIELD_OPS_FRIENDS(OptimizationFlags)
  220. /**
  221. * Can be used as a helper to decide which fragment processor OptimizationFlags should be set.
  222. * This assumes that the subclass output color will be a modulation of the input color with a
  223. * value read from a texture of the passed config and that the texture contains premultiplied
  224. * color or alpha values that are in range.
  225. *
  226. * Since there are multiple ways in which a sampler may have its coordinates clamped or wrapped,
  227. * callers must determine on their own if the sampling uses a decal strategy in any way, in
  228. * which case the texture may become transparent regardless of the pixel config.
  229. */
  230. static OptimizationFlags ModulateForSamplerOptFlags(GrPixelConfig config, bool samplingDecal) {
  231. if (samplingDecal) {
  232. return kCompatibleWithCoverageAsAlpha_OptimizationFlag;
  233. } else {
  234. return ModulateForClampedSamplerOptFlags(config);
  235. }
  236. }
  237. // As above, but callers should somehow ensure or assert their sampler still uses clamping
  238. static OptimizationFlags ModulateForClampedSamplerOptFlags(GrPixelConfig config) {
  239. if (GrPixelConfigIsOpaque(config)) {
  240. return kCompatibleWithCoverageAsAlpha_OptimizationFlag |
  241. kPreservesOpaqueInput_OptimizationFlag;
  242. } else {
  243. return kCompatibleWithCoverageAsAlpha_OptimizationFlag;
  244. }
  245. }
  246. GrFragmentProcessor(ClassID classID, OptimizationFlags optimizationFlags)
  247. : INHERITED(classID)
  248. , fFlags(optimizationFlags) {
  249. SkASSERT((fFlags & ~kAll_OptimizationFlags) == 0);
  250. }
  251. OptimizationFlags optimizationFlags() const {
  252. return static_cast<OptimizationFlags>(kAll_OptimizationFlags & fFlags);
  253. }
  254. /** Useful when you can't call fp->optimizationFlags() on a base class object from a subclass.*/
  255. static OptimizationFlags ProcessorOptimizationFlags(const GrFragmentProcessor* fp) {
  256. return fp->optimizationFlags();
  257. }
  258. /**
  259. * This allows one subclass to access another subclass's implementation of
  260. * constantOutputForConstantInput. It must only be called when
  261. * hasConstantOutputForConstantInput() is known to be true.
  262. */
  263. static SkPMColor4f ConstantOutputForConstantInput(const GrFragmentProcessor& fp,
  264. const SkPMColor4f& input) {
  265. SkASSERT(fp.hasConstantOutputForConstantInput());
  266. return fp.constantOutputForConstantInput(input);
  267. }
  268. /**
  269. * Fragment Processor subclasses call this from their constructor to register coordinate
  270. * transformations. Coord transforms provide a mechanism for a processor to receive coordinates
  271. * in their FS code. The matrix expresses a transformation from local space. For a given
  272. * fragment the matrix will be applied to the local coordinate that maps to the fragment.
  273. *
  274. * When the transformation has perspective, the transformed coordinates will have
  275. * 3 components. Otherwise they'll have 2.
  276. *
  277. * This must only be called from the constructor because GrProcessors are immutable. The
  278. * processor subclass manages the lifetime of the transformations (this function only stores a
  279. * pointer). The GrCoordTransform is typically a member field of the GrProcessor subclass.
  280. *
  281. * A processor subclass that has multiple methods of construction should always add its coord
  282. * transforms in a consistent order. The non-virtual implementation of isEqual() automatically
  283. * compares transforms and will assume they line up across the two processor instances.
  284. */
  285. void addCoordTransform(const GrCoordTransform*);
  286. /**
  287. * FragmentProcessor subclasses call this from their constructor to register any child
  288. * FragmentProcessors they have. This must be called AFTER all texture accesses and coord
  289. * transforms have been added.
  290. * This is for processors whose shader code will be composed of nested processors whose output
  291. * colors will be combined somehow to produce its output color. Registering these child
  292. * processors will allow the ProgramBuilder to automatically handle their transformed coords and
  293. * texture accesses and mangle their uniform and output color names.
  294. */
  295. int registerChildProcessor(std::unique_ptr<GrFragmentProcessor> child);
  296. void setTextureSamplerCnt(int cnt) {
  297. SkASSERT(cnt >= 0);
  298. fTextureSamplerCnt = cnt;
  299. }
  300. /**
  301. * Helper for implementing onTextureSampler(). E.g.:
  302. * return IthTexureSampler(i, fMyFirstSampler, fMySecondSampler, fMyThirdSampler);
  303. */
  304. template <typename... Args>
  305. static const TextureSampler& IthTextureSampler(int i, const TextureSampler& samp0,
  306. const Args&... samps) {
  307. return (0 == i) ? samp0 : IthTextureSampler(i - 1, samps...);
  308. }
  309. inline static const TextureSampler& IthTextureSampler(int i);
  310. private:
  311. virtual SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& /* inputColor */) const {
  312. SK_ABORT("Subclass must override this if advertising this optimization.");
  313. return SK_PMColor4fTRANSPARENT;
  314. }
  315. /** Returns a new instance of the appropriate *GL* implementation class
  316. for the given GrFragmentProcessor; caller is responsible for deleting
  317. the object. */
  318. virtual GrGLSLFragmentProcessor* onCreateGLSLInstance() const = 0;
  319. /** Implemented using GLFragmentProcessor::GenKey as described in this class's comment. */
  320. virtual void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const = 0;
  321. /**
  322. * Subclass implements this to support isEqual(). It will only be called if it is known that
  323. * the two processors are of the same subclass (i.e. they return the same object from
  324. * getFactory()). The processor subclass should not compare its coord transforms as that will
  325. * be performed automatically in the non-virtual isEqual().
  326. */
  327. virtual bool onIsEqual(const GrFragmentProcessor&) const = 0;
  328. virtual const TextureSampler& onTextureSampler(int) const { return IthTextureSampler(0); }
  329. bool hasSameTransforms(const GrFragmentProcessor&) const;
  330. enum PrivateFlags {
  331. kFirstPrivateFlag = kAll_OptimizationFlags + 1,
  332. kUsesLocalCoords_Flag = kFirstPrivateFlag,
  333. };
  334. mutable uint32_t fFlags = 0;
  335. int fTextureSamplerCnt = 0;
  336. SkSTArray<4, const GrCoordTransform*, true> fCoordTransforms;
  337. SkSTArray<1, std::unique_ptr<GrFragmentProcessor>, true> fChildProcessors;
  338. typedef GrProcessor INHERITED;
  339. };
  340. /**
  341. * Used to represent a texture that is required by a GrFragmentProcessor. It holds a GrTextureProxy
  342. * along with an associated GrSamplerState. TextureSamplers don't perform any coord manipulation to
  343. * account for texture origin.
  344. */
  345. class GrFragmentProcessor::TextureSampler {
  346. public:
  347. TextureSampler() = default;
  348. /**
  349. * This copy constructor is used by GrFragmentProcessor::clone() implementations. The copy
  350. * always takes a new ref on the texture proxy as the new fragment processor will not yet be
  351. * in pending execution state.
  352. */
  353. explicit TextureSampler(const TextureSampler& that)
  354. : fProxy(that.fProxy)
  355. , fSamplerState(that.fSamplerState) {}
  356. TextureSampler(sk_sp<GrTextureProxy>, const GrSamplerState&);
  357. explicit TextureSampler(sk_sp<GrTextureProxy>,
  358. GrSamplerState::Filter = GrSamplerState::Filter::kNearest,
  359. GrSamplerState::WrapMode wrapXAndY = GrSamplerState::WrapMode::kClamp);
  360. TextureSampler& operator=(const TextureSampler&) = delete;
  361. void reset(sk_sp<GrTextureProxy>, const GrSamplerState&);
  362. void reset(sk_sp<GrTextureProxy>,
  363. GrSamplerState::Filter = GrSamplerState::Filter::kNearest,
  364. GrSamplerState::WrapMode wrapXAndY = GrSamplerState::WrapMode::kClamp);
  365. bool operator==(const TextureSampler& that) const {
  366. return this->proxy()->underlyingUniqueID() == that.proxy()->underlyingUniqueID() &&
  367. fSamplerState == that.fSamplerState;
  368. }
  369. bool operator!=(const TextureSampler& other) const { return !(*this == other); }
  370. SkDEBUGCODE(bool isInstantiated() const { return fProxy->isInstantiated(); })
  371. // 'peekTexture' should only ever be called after a successful 'instantiate' call
  372. GrTexture* peekTexture() const {
  373. SkASSERT(fProxy->isInstantiated());
  374. return fProxy->peekTexture();
  375. }
  376. GrTextureProxy* proxy() const { return fProxy.get(); }
  377. const GrSamplerState& samplerState() const { return fSamplerState; }
  378. const GrSwizzle& swizzle() const { return this->proxy()->textureSwizzle(); }
  379. bool isInitialized() const { return SkToBool(fProxy.get()); }
  380. private:
  381. sk_sp<GrTextureProxy> fProxy;
  382. GrSamplerState fSamplerState;
  383. };
  384. //////////////////////////////////////////////////////////////////////////////
  385. const GrFragmentProcessor::TextureSampler& GrFragmentProcessor::IthTextureSampler(int i) {
  386. SK_ABORT("Illegal texture sampler index");
  387. static const TextureSampler kBogus;
  388. return kBogus;
  389. }
  390. GR_MAKE_BITFIELD_OPS(GrFragmentProcessor::OptimizationFlags)
  391. #endif