GrTypes.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349
  1. /*
  2. * Copyright 2010 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #ifndef GrTypes_DEFINED
  8. #define GrTypes_DEFINED
  9. #include "include/core/SkMath.h"
  10. #include "include/core/SkTypes.h"
  11. #include "include/gpu/GrConfig.h"
  12. class GrBackendSemaphore;
  13. class SkImage;
  14. class SkSurface;
  15. ////////////////////////////////////////////////////////////////////////////////
  16. /**
  17. * Defines overloaded bitwise operators to make it easier to use an enum as a
  18. * bitfield.
  19. */
  20. #define GR_MAKE_BITFIELD_OPS(X) \
  21. inline X operator |(X a, X b) { \
  22. return (X) (+a | +b); \
  23. } \
  24. inline X& operator |=(X& a, X b) { \
  25. return (a = a | b); \
  26. } \
  27. inline X operator &(X a, X b) { \
  28. return (X) (+a & +b); \
  29. } \
  30. inline X& operator &=(X& a, X b) { \
  31. return (a = a & b); \
  32. } \
  33. template <typename T> \
  34. inline X operator &(T a, X b) { \
  35. return (X) (+a & +b); \
  36. } \
  37. template <typename T> \
  38. inline X operator &(X a, T b) { \
  39. return (X) (+a & +b); \
  40. } \
  41. #define GR_DECL_BITFIELD_OPS_FRIENDS(X) \
  42. friend X operator |(X a, X b); \
  43. friend X& operator |=(X& a, X b); \
  44. \
  45. friend X operator &(X a, X b); \
  46. friend X& operator &=(X& a, X b); \
  47. \
  48. template <typename T> \
  49. friend X operator &(T a, X b); \
  50. \
  51. template <typename T> \
  52. friend X operator &(X a, T b); \
  53. /**
  54. * Wraps a C++11 enum that we use as a bitfield, and enables a limited amount of
  55. * masking with type safety. Instantiated with the ~ operator.
  56. */
  57. template<typename TFlags> class GrTFlagsMask {
  58. public:
  59. constexpr explicit GrTFlagsMask(TFlags value) : GrTFlagsMask(static_cast<int>(value)) {}
  60. constexpr explicit GrTFlagsMask(int value) : fValue(value) {}
  61. constexpr int value() const { return fValue; }
  62. private:
  63. const int fValue;
  64. };
  65. // Or-ing a mask always returns another mask.
  66. template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator|(GrTFlagsMask<TFlags> a,
  67. GrTFlagsMask<TFlags> b) {
  68. return GrTFlagsMask<TFlags>(a.value() | b.value());
  69. }
  70. template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator|(GrTFlagsMask<TFlags> a,
  71. TFlags b) {
  72. return GrTFlagsMask<TFlags>(a.value() | static_cast<int>(b));
  73. }
  74. template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator|(TFlags a,
  75. GrTFlagsMask<TFlags> b) {
  76. return GrTFlagsMask<TFlags>(static_cast<int>(a) | b.value());
  77. }
  78. template<typename TFlags> inline GrTFlagsMask<TFlags>& operator|=(GrTFlagsMask<TFlags>& a,
  79. GrTFlagsMask<TFlags> b) {
  80. return (a = a | b);
  81. }
  82. // And-ing two masks returns another mask; and-ing one with regular flags returns flags.
  83. template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator&(GrTFlagsMask<TFlags> a,
  84. GrTFlagsMask<TFlags> b) {
  85. return GrTFlagsMask<TFlags>(a.value() & b.value());
  86. }
  87. template<typename TFlags> constexpr TFlags operator&(GrTFlagsMask<TFlags> a, TFlags b) {
  88. return static_cast<TFlags>(a.value() & static_cast<int>(b));
  89. }
  90. template<typename TFlags> constexpr TFlags operator&(TFlags a, GrTFlagsMask<TFlags> b) {
  91. return static_cast<TFlags>(static_cast<int>(a) & b.value());
  92. }
  93. template<typename TFlags> inline TFlags& operator&=(TFlags& a, GrTFlagsMask<TFlags> b) {
  94. return (a = a & b);
  95. }
  96. /**
  97. * Defines bitwise operators that make it possible to use an enum class as a
  98. * basic bitfield.
  99. */
  100. #define GR_MAKE_BITFIELD_CLASS_OPS(X) \
  101. constexpr GrTFlagsMask<X> operator~(X a) { \
  102. return GrTFlagsMask<X>(~static_cast<int>(a)); \
  103. } \
  104. constexpr X operator|(X a, X b) { \
  105. return static_cast<X>(static_cast<int>(a) | static_cast<int>(b)); \
  106. } \
  107. inline X& operator|=(X& a, X b) { \
  108. return (a = a | b); \
  109. } \
  110. constexpr bool operator&(X a, X b) { \
  111. return SkToBool(static_cast<int>(a) & static_cast<int>(b)); \
  112. } \
  113. #define GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(X) \
  114. friend constexpr GrTFlagsMask<X> operator ~(X); \
  115. friend constexpr X operator |(X, X); \
  116. friend X& operator |=(X&, X); \
  117. friend constexpr bool operator &(X, X)
  118. ////////////////////////////////////////////////////////////////////////////////
  119. // compile time versions of min/max
  120. #define GR_CT_MAX(a, b) (((b) < (a)) ? (a) : (b))
  121. #define GR_CT_MIN(a, b) (((b) < (a)) ? (b) : (a))
  122. /**
  123. * divide, rounding up
  124. */
  125. static inline constexpr int32_t GrIDivRoundUp(int x, int y) {
  126. SkASSERT(y > 0);
  127. return (x + (y-1)) / y;
  128. }
  129. static inline constexpr uint32_t GrUIDivRoundUp(uint32_t x, uint32_t y) {
  130. return (x + (y-1)) / y;
  131. }
  132. static inline constexpr size_t GrSizeDivRoundUp(size_t x, size_t y) { return (x + (y - 1)) / y; }
  133. /**
  134. * align up
  135. */
  136. static inline constexpr uint32_t GrUIAlignUp(uint32_t x, uint32_t alignment) {
  137. return GrUIDivRoundUp(x, alignment) * alignment;
  138. }
  139. static inline constexpr size_t GrSizeAlignUp(size_t x, size_t alignment) {
  140. return GrSizeDivRoundUp(x, alignment) * alignment;
  141. }
  142. /**
  143. * amount of pad needed to align up
  144. */
  145. static inline constexpr uint32_t GrUIAlignUpPad(uint32_t x, uint32_t alignment) {
  146. return (alignment - x % alignment) % alignment;
  147. }
  148. static inline constexpr size_t GrSizeAlignUpPad(size_t x, size_t alignment) {
  149. return (alignment - x % alignment) % alignment;
  150. }
  151. /**
  152. * align down
  153. */
  154. static inline constexpr uint32_t GrUIAlignDown(uint32_t x, uint32_t alignment) {
  155. return (x / alignment) * alignment;
  156. }
  157. static inline constexpr size_t GrSizeAlignDown(size_t x, uint32_t alignment) {
  158. return (x / alignment) * alignment;
  159. }
  160. ///////////////////////////////////////////////////////////////////////////////
  161. /**
  162. * Possible 3D APIs that may be used by Ganesh.
  163. */
  164. enum class GrBackendApi : unsigned {
  165. kMetal,
  166. kDawn,
  167. kOpenGL,
  168. kVulkan,
  169. /**
  170. * Mock is a backend that does not draw anything. It is used for unit tests
  171. * and to measure CPU overhead.
  172. */
  173. kMock,
  174. /**
  175. * Added here to support the legacy GrBackend enum value and clients who referenced it using
  176. * GrBackend::kOpenGL_GrBackend.
  177. */
  178. kOpenGL_GrBackend = kOpenGL,
  179. };
  180. /**
  181. * Previously the above enum was not an enum class but a normal enum. To support the legacy use of
  182. * the enum values we define them below so that no clients break.
  183. */
  184. typedef GrBackendApi GrBackend;
  185. static constexpr GrBackendApi kMetal_GrBackend = GrBackendApi::kMetal;
  186. static constexpr GrBackendApi kVulkan_GrBackend = GrBackendApi::kVulkan;
  187. static constexpr GrBackendApi kMock_GrBackend = GrBackendApi::kMock;
  188. ///////////////////////////////////////////////////////////////////////////////
  189. /**
  190. * Used to say whether a texture has mip levels allocated or not.
  191. */
  192. enum class GrMipMapped : bool {
  193. kNo = false,
  194. kYes = true
  195. };
  196. /*
  197. * Can a GrBackendObject be rendered to?
  198. */
  199. enum class GrRenderable : bool {
  200. kNo = false,
  201. kYes = true
  202. };
  203. /*
  204. * Used to say whether texture is backed by protected memory.
  205. */
  206. enum class GrProtected : bool {
  207. kNo = false,
  208. kYes = true
  209. };
  210. ///////////////////////////////////////////////////////////////////////////////
  211. /**
  212. * GPU SkImage and SkSurfaces can be stored such that (0, 0) in texture space may correspond to
  213. * either the top-left or bottom-left content pixel.
  214. */
  215. enum GrSurfaceOrigin : int {
  216. kTopLeft_GrSurfaceOrigin,
  217. kBottomLeft_GrSurfaceOrigin,
  218. };
  219. /**
  220. * A GrContext's cache of backend context state can be partially invalidated.
  221. * These enums are specific to the GL backend and we'd add a new set for an alternative backend.
  222. */
  223. enum GrGLBackendState {
  224. kRenderTarget_GrGLBackendState = 1 << 0,
  225. // Also includes samplers bound to texture units.
  226. kTextureBinding_GrGLBackendState = 1 << 1,
  227. // View state stands for scissor and viewport
  228. kView_GrGLBackendState = 1 << 2,
  229. kBlend_GrGLBackendState = 1 << 3,
  230. kMSAAEnable_GrGLBackendState = 1 << 4,
  231. kVertex_GrGLBackendState = 1 << 5,
  232. kStencil_GrGLBackendState = 1 << 6,
  233. kPixelStore_GrGLBackendState = 1 << 7,
  234. kProgram_GrGLBackendState = 1 << 8,
  235. kFixedFunction_GrGLBackendState = 1 << 9,
  236. kMisc_GrGLBackendState = 1 << 10,
  237. kPathRendering_GrGLBackendState = 1 << 11,
  238. kALL_GrGLBackendState = 0xffff
  239. };
  240. /**
  241. * This value translates to reseting all the context state for any backend.
  242. */
  243. static const uint32_t kAll_GrBackendState = 0xffffffff;
  244. enum GrFlushFlags {
  245. kNone_GrFlushFlags = 0,
  246. // flush will wait till all submitted GPU work is finished before returning.
  247. kSyncCpu_GrFlushFlag = 0x1,
  248. };
  249. typedef void* GrGpuFinishedContext;
  250. typedef void (*GrGpuFinishedProc)(GrGpuFinishedContext finishedContext);
  251. /**
  252. * Struct to supply options to flush calls.
  253. *
  254. * After issuing all commands, fNumSemaphore semaphores will be signaled by the gpu. The client
  255. * passes in an array of fNumSemaphores GrBackendSemaphores. In general these GrBackendSemaphore's
  256. * can be either initialized or not. If they are initialized, the backend uses the passed in
  257. * semaphore. If it is not initialized, a new semaphore is created and the GrBackendSemaphore
  258. * object is initialized with that semaphore.
  259. *
  260. * The client will own and be responsible for deleting the underlying semaphores that are stored
  261. * and returned in initialized GrBackendSemaphore objects. The GrBackendSemaphore objects
  262. * themselves can be deleted as soon as this function returns.
  263. *
  264. * If a finishedProc is provided, the finishedProc will be called when all work submitted to the gpu
  265. * from this flush call and all previous flush calls has finished on the GPU. If the flush call
  266. * fails due to an error and nothing ends up getting sent to the GPU, the finished proc is called
  267. * immediately.
  268. */
  269. struct GrFlushInfo {
  270. GrFlushFlags fFlags = kNone_GrFlushFlags;
  271. int fNumSemaphores = 0;
  272. GrBackendSemaphore* fSignalSemaphores = nullptr;
  273. GrGpuFinishedProc fFinishedProc = nullptr;
  274. GrGpuFinishedContext fFinishedContext = nullptr;
  275. };
  276. /**
  277. * Enum used as return value when flush with semaphores so the client knows whether the semaphores
  278. * were submitted to GPU or not.
  279. */
  280. enum class GrSemaphoresSubmitted : bool {
  281. kNo = false,
  282. kYes = true
  283. };
  284. /**
  285. * Array of SkImages and SkSurfaces which Skia will prepare for external use when passed into a
  286. * flush call on GrContext. All the SkImages and SkSurfaces must be GPU backed.
  287. *
  288. * If fPrepareSurfaceForPresent is not nullptr, then it must be an array the size of fNumSurfaces.
  289. * Each entry in the array corresponds to the SkSurface at the same index in the fSurfaces array. If
  290. * an entry is true, then that surface will be prepared for both external use and present.
  291. *
  292. * Currently this only has an effect if the backend API is Vulkan. In this case, all the underlying
  293. * VkImages associated with the SkImages and SkSurfaces will be transitioned into the VkQueueFamily
  294. * in which they were originally wrapped or created with. This allows a client to wrap a VkImage
  295. * from a queue which is different from the graphics queue and then have Skia transition it back to
  296. * that queue without needing to delete the SkImage or SkSurface. If the an SkSurface is also
  297. * flagged to be prepared for present, then its VkImageLayout will be set to
  298. * VK_IMAGE_LAYOUT_PRESENT_SRC_KHR if the VK_KHR_swapchain extension has been enabled for the
  299. * GrContext and the original queue is not VK_QUEUE_FAMILY_EXTERNAL or VK_QUEUE_FAMILY_FOREIGN_EXT.
  300. *
  301. * If an SkSurface or SkImage is used again, it will be transitioned back to the graphics queue and
  302. * whatever layout is needed for its use.
  303. */
  304. struct GrPrepareForExternalIORequests {
  305. int fNumImages = 0;
  306. SkImage** fImages = nullptr;
  307. int fNumSurfaces = 0;
  308. SkSurface** fSurfaces = nullptr;
  309. bool* fPrepareSurfaceForPresent = nullptr;
  310. bool hasRequests() const { return fNumImages || fNumSurfaces; }
  311. };
  312. #endif