GrGpu.cpp 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636
  1. /*
  2. * Copyright 2010 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #include "src/gpu/GrGpu.h"
  8. #include "include/gpu/GrBackendSemaphore.h"
  9. #include "include/gpu/GrBackendSurface.h"
  10. #include "include/gpu/GrContext.h"
  11. #include "src/core/SkMathPriv.h"
  12. #include "src/gpu/GrAuditTrail.h"
  13. #include "src/gpu/GrCaps.h"
  14. #include "src/gpu/GrContextPriv.h"
  15. #include "src/gpu/GrDataUtils.h"
  16. #include "src/gpu/GrGpuResourcePriv.h"
  17. #include "src/gpu/GrMesh.h"
  18. #include "src/gpu/GrPathRendering.h"
  19. #include "src/gpu/GrPipeline.h"
  20. #include "src/gpu/GrRenderTargetPriv.h"
  21. #include "src/gpu/GrResourceCache.h"
  22. #include "src/gpu/GrResourceProvider.h"
  23. #include "src/gpu/GrSemaphore.h"
  24. #include "src/gpu/GrStencilAttachment.h"
  25. #include "src/gpu/GrStencilSettings.h"
  26. #include "src/gpu/GrSurfacePriv.h"
  27. #include "src/gpu/GrTexturePriv.h"
  28. #include "src/gpu/GrTextureProxyPriv.h"
  29. #include "src/gpu/GrTracing.h"
  30. #include "src/utils/SkJSONWriter.h"
  31. ////////////////////////////////////////////////////////////////////////////////
  32. GrGpu::GrGpu(GrContext* context) : fResetBits(kAll_GrBackendState), fContext(context) {}
  33. GrGpu::~GrGpu() {}
  34. void GrGpu::disconnect(DisconnectType) {}
  35. ////////////////////////////////////////////////////////////////////////////////
  36. bool GrGpu::IsACopyNeededForRepeatWrapMode(const GrCaps* caps, GrTextureProxy* texProxy,
  37. int width, int height,
  38. GrSamplerState::Filter filter,
  39. GrTextureProducer::CopyParams* copyParams,
  40. SkScalar scaleAdjust[2]) {
  41. if (!caps->npotTextureTileSupport() &&
  42. (!SkIsPow2(width) || !SkIsPow2(height))) {
  43. SkASSERT(scaleAdjust);
  44. copyParams->fWidth = GrNextPow2(width);
  45. copyParams->fHeight = GrNextPow2(height);
  46. SkASSERT(scaleAdjust);
  47. scaleAdjust[0] = ((SkScalar)copyParams->fWidth) / width;
  48. scaleAdjust[1] = ((SkScalar)copyParams->fHeight) / height;
  49. switch (filter) {
  50. case GrSamplerState::Filter::kNearest:
  51. copyParams->fFilter = GrSamplerState::Filter::kNearest;
  52. break;
  53. case GrSamplerState::Filter::kBilerp:
  54. case GrSamplerState::Filter::kMipMap:
  55. // We are only ever scaling up so no reason to ever indicate kMipMap.
  56. copyParams->fFilter = GrSamplerState::Filter::kBilerp;
  57. break;
  58. }
  59. return true;
  60. }
  61. if (texProxy) {
  62. // If the texture format itself doesn't support repeat wrap mode or mipmapping (and
  63. // those capabilities are required) force a copy.
  64. if (texProxy->hasRestrictedSampling()) {
  65. copyParams->fFilter = GrSamplerState::Filter::kNearest;
  66. copyParams->fWidth = texProxy->width();
  67. copyParams->fHeight = texProxy->height();
  68. return true;
  69. }
  70. }
  71. return false;
  72. }
  73. bool GrGpu::IsACopyNeededForMips(const GrCaps* caps, const GrTextureProxy* texProxy,
  74. GrSamplerState::Filter filter,
  75. GrTextureProducer::CopyParams* copyParams) {
  76. SkASSERT(texProxy);
  77. bool willNeedMips = GrSamplerState::Filter::kMipMap == filter && caps->mipMapSupport();
  78. // If the texture format itself doesn't support mipmapping (and those capabilities are required)
  79. // force a copy.
  80. if (willNeedMips && texProxy->mipMapped() == GrMipMapped::kNo) {
  81. copyParams->fFilter = GrSamplerState::Filter::kNearest;
  82. copyParams->fWidth = texProxy->width();
  83. copyParams->fHeight = texProxy->height();
  84. return true;
  85. }
  86. return false;
  87. }
  88. static bool validate_levels(int w, int h, const GrMipLevel texels[], int mipLevelCount, int bpp,
  89. const GrCaps* caps, bool mustHaveDataForAllLevels = false) {
  90. SkASSERT(mipLevelCount > 0);
  91. bool hasBasePixels = texels[0].fPixels;
  92. int levelsWithPixelsCnt = 0;
  93. for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; ++currentMipLevel) {
  94. if (texels[currentMipLevel].fPixels) {
  95. const size_t minRowBytes = w * bpp;
  96. if (caps->writePixelsRowBytesSupport()) {
  97. if (texels[currentMipLevel].fRowBytes < minRowBytes) {
  98. return false;
  99. }
  100. if (texels[currentMipLevel].fRowBytes % bpp) {
  101. return false;
  102. }
  103. } else {
  104. if (texels[currentMipLevel].fRowBytes != minRowBytes) {
  105. return false;
  106. }
  107. }
  108. ++levelsWithPixelsCnt;
  109. }
  110. if (w == 1 && h == 1) {
  111. if (currentMipLevel != mipLevelCount - 1) {
  112. return false;
  113. }
  114. } else {
  115. w = std::max(w / 2, 1);
  116. h = std::max(h / 2, 1);
  117. }
  118. }
  119. // Either just a base layer or a full stack is required.
  120. if (mipLevelCount != 1 && (w != 1 || h != 1)) {
  121. return false;
  122. }
  123. // Can specify just the base, all levels, or no levels.
  124. if (!hasBasePixels) {
  125. return levelsWithPixelsCnt == 0;
  126. }
  127. if (levelsWithPixelsCnt == 1 && !mustHaveDataForAllLevels) {
  128. return true;
  129. }
  130. return levelsWithPixelsCnt == mipLevelCount;
  131. }
  132. sk_sp<GrTexture> GrGpu::createTexture(const GrSurfaceDesc& origDesc, GrRenderable renderable,
  133. int renderTargetSampleCnt, SkBudgeted budgeted,
  134. GrProtected isProtected, const GrMipLevel texels[],
  135. int mipLevelCount) {
  136. TRACE_EVENT0("skia.gpu", TRACE_FUNC);
  137. if (GrPixelConfigIsCompressed(origDesc.fConfig)) {
  138. // Call GrGpu::createCompressedTexture.
  139. return nullptr;
  140. }
  141. GrSurfaceDesc desc = origDesc;
  142. GrMipMapped mipMapped = mipLevelCount > 1 ? GrMipMapped::kYes : GrMipMapped::kNo;
  143. if (!this->caps()->validateSurfaceDesc(desc, renderable, renderTargetSampleCnt, mipMapped)) {
  144. return nullptr;
  145. }
  146. if (renderable == GrRenderable::kYes) {
  147. renderTargetSampleCnt =
  148. this->caps()->getRenderTargetSampleCount(renderTargetSampleCnt, desc.fConfig);
  149. }
  150. // Attempt to catch un- or wrongly initialized sample counts.
  151. SkASSERT(renderTargetSampleCnt > 0 && renderTargetSampleCnt <= 64);
  152. bool mustHaveDataForAllLevels = this->caps()->createTextureMustSpecifyAllLevels();
  153. if (mipLevelCount) {
  154. int bpp = GrBytesPerPixel(desc.fConfig);
  155. if (!validate_levels(desc.fWidth, desc.fHeight, texels, mipLevelCount, bpp, this->caps(),
  156. mustHaveDataForAllLevels)) {
  157. return nullptr;
  158. }
  159. } else if (mustHaveDataForAllLevels) {
  160. return nullptr;
  161. }
  162. this->handleDirtyContext();
  163. sk_sp<GrTexture> tex = this->onCreateTexture(desc, renderable, renderTargetSampleCnt, budgeted,
  164. isProtected, texels, mipLevelCount);
  165. if (tex) {
  166. if (!this->caps()->reuseScratchTextures() && renderable == GrRenderable::kNo) {
  167. tex->resourcePriv().removeScratchKey();
  168. }
  169. fStats.incTextureCreates();
  170. if (mipLevelCount) {
  171. if (texels[0].fPixels) {
  172. fStats.incTextureUploads();
  173. }
  174. }
  175. }
  176. return tex;
  177. }
  178. sk_sp<GrTexture> GrGpu::createTexture(const GrSurfaceDesc& desc, GrRenderable renderable,
  179. int renderTargetSampleCnt, SkBudgeted budgeted,
  180. GrProtected isProtected) {
  181. return this->createTexture(desc, renderable, renderTargetSampleCnt, budgeted, isProtected,
  182. nullptr, 0);
  183. }
  184. sk_sp<GrTexture> GrGpu::createCompressedTexture(int width, int height,
  185. SkImage::CompressionType compressionType,
  186. SkBudgeted budgeted, const void* data,
  187. size_t dataSize) {
  188. this->handleDirtyContext();
  189. if (width < 1 || width > this->caps()->maxTextureSize() ||
  190. height < 1 || height > this->caps()->maxTextureSize()) {
  191. return nullptr;
  192. }
  193. // Note if we relax the requirement that data must be provided then we must check
  194. // caps()->shouldInitializeTextures() here.
  195. if (!data) {
  196. return nullptr;
  197. }
  198. if (!this->caps()->isConfigTexturable(GrCompressionTypePixelConfig(compressionType))) {
  199. return nullptr;
  200. }
  201. if (dataSize < GrCompressedDataSize(compressionType, width, height)) {
  202. return nullptr;
  203. }
  204. return this->onCreateCompressedTexture(width, height, compressionType, budgeted, data);
  205. }
  206. sk_sp<GrTexture> GrGpu::wrapBackendTexture(const GrBackendTexture& backendTex,
  207. GrColorType colorType,
  208. GrWrapOwnership ownership, GrWrapCacheable cacheable,
  209. GrIOType ioType) {
  210. SkASSERT(ioType != kWrite_GrIOType);
  211. this->handleDirtyContext();
  212. const GrCaps* caps = this->caps();
  213. SkASSERT(caps);
  214. if (!caps->isFormatTexturable(colorType, backendTex.getBackendFormat())) {
  215. return nullptr;
  216. }
  217. if (backendTex.width() > caps->maxTextureSize() ||
  218. backendTex.height() > caps->maxTextureSize()) {
  219. return nullptr;
  220. }
  221. SkASSERT(GrCaps::AreConfigsCompatible(backendTex.config(),
  222. caps->getConfigFromBackendFormat(
  223. backendTex.getBackendFormat(),
  224. colorType)));
  225. return this->onWrapBackendTexture(backendTex, colorType, ownership, cacheable, ioType);
  226. }
  227. sk_sp<GrTexture> GrGpu::wrapRenderableBackendTexture(const GrBackendTexture& backendTex,
  228. int sampleCnt, GrColorType colorType,
  229. GrWrapOwnership ownership,
  230. GrWrapCacheable cacheable) {
  231. this->handleDirtyContext();
  232. if (sampleCnt < 1) {
  233. return nullptr;
  234. }
  235. const GrCaps* caps = this->caps();
  236. SkASSERT(GrCaps::AreConfigsCompatible(backendTex.config(),
  237. caps->getConfigFromBackendFormat(
  238. backendTex.getBackendFormat(),
  239. colorType)));
  240. if (!caps->isFormatTexturable(colorType, backendTex.getBackendFormat()) ||
  241. !caps->getRenderTargetSampleCount(sampleCnt, colorType, backendTex.getBackendFormat())) {
  242. return nullptr;
  243. }
  244. if (backendTex.width() > caps->maxRenderTargetSize() ||
  245. backendTex.height() > caps->maxRenderTargetSize()) {
  246. return nullptr;
  247. }
  248. sk_sp<GrTexture> tex = this->onWrapRenderableBackendTexture(backendTex, sampleCnt, colorType,
  249. ownership, cacheable);
  250. SkASSERT(!tex || tex->asRenderTarget());
  251. return tex;
  252. }
  253. sk_sp<GrRenderTarget> GrGpu::wrapBackendRenderTarget(const GrBackendRenderTarget& backendRT,
  254. GrColorType colorType) {
  255. this->handleDirtyContext();
  256. const GrCaps* caps = this->caps();
  257. SkASSERT(GrCaps::AreConfigsCompatible(backendRT.config(),
  258. caps->getConfigFromBackendFormat(
  259. backendRT.getBackendFormat(),
  260. colorType)));
  261. if (0 == caps->getRenderTargetSampleCount(backendRT.sampleCnt(), colorType,
  262. backendRT.getBackendFormat())) {
  263. return nullptr;
  264. }
  265. return this->onWrapBackendRenderTarget(backendRT, colorType);
  266. }
  267. sk_sp<GrRenderTarget> GrGpu::wrapBackendTextureAsRenderTarget(const GrBackendTexture& backendTex,
  268. int sampleCnt,
  269. GrColorType colorType) {
  270. this->handleDirtyContext();
  271. const GrCaps* caps = this->caps();
  272. int maxSize = caps->maxTextureSize();
  273. if (backendTex.width() > maxSize || backendTex.height() > maxSize) {
  274. return nullptr;
  275. }
  276. SkASSERT(GrCaps::AreConfigsCompatible(backendTex.config(),
  277. caps->getConfigFromBackendFormat(
  278. backendTex.getBackendFormat(),
  279. colorType)));
  280. if (0 == caps->getRenderTargetSampleCount(sampleCnt, colorType,
  281. backendTex.getBackendFormat())) {
  282. return nullptr;
  283. }
  284. return this->onWrapBackendTextureAsRenderTarget(backendTex, sampleCnt, colorType);
  285. }
  286. sk_sp<GrRenderTarget> GrGpu::wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo,
  287. const GrVkDrawableInfo& vkInfo) {
  288. return this->onWrapVulkanSecondaryCBAsRenderTarget(imageInfo, vkInfo);
  289. }
  290. sk_sp<GrRenderTarget> GrGpu::onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo,
  291. const GrVkDrawableInfo& vkInfo) {
  292. // This is only supported on Vulkan so we default to returning nullptr here
  293. return nullptr;
  294. }
  295. sk_sp<GrGpuBuffer> GrGpu::createBuffer(size_t size, GrGpuBufferType intendedType,
  296. GrAccessPattern accessPattern, const void* data) {
  297. TRACE_EVENT0("skia.gpu", TRACE_FUNC);
  298. this->handleDirtyContext();
  299. sk_sp<GrGpuBuffer> buffer = this->onCreateBuffer(size, intendedType, accessPattern, data);
  300. if (!this->caps()->reuseScratchBuffers()) {
  301. buffer->resourcePriv().removeScratchKey();
  302. }
  303. return buffer;
  304. }
  305. bool GrGpu::copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
  306. const SkIPoint& dstPoint, bool canDiscardOutsideDstRect) {
  307. TRACE_EVENT0("skia.gpu", TRACE_FUNC);
  308. SkASSERT(dst && src);
  309. if (dst->readOnly()) {
  310. return false;
  311. }
  312. this->handleDirtyContext();
  313. return this->onCopySurface(dst, src, srcRect, dstPoint, canDiscardOutsideDstRect);
  314. }
  315. bool GrGpu::readPixels(GrSurface* surface, int left, int top, int width, int height,
  316. GrColorType dstColorType, void* buffer, size_t rowBytes) {
  317. TRACE_EVENT0("skia.gpu", TRACE_FUNC);
  318. SkASSERT(surface);
  319. auto subRect = SkIRect::MakeXYWH(left, top, width, height);
  320. auto bounds = SkIRect::MakeWH(surface->width(), surface->height());
  321. if (!bounds.contains(subRect)) {
  322. return false;
  323. }
  324. size_t minRowBytes = SkToSizeT(GrColorTypeBytesPerPixel(dstColorType) * width);
  325. if (!this->caps()->readPixelsRowBytesSupport()) {
  326. if (rowBytes != minRowBytes) {
  327. return false;
  328. }
  329. } else {
  330. if (rowBytes < minRowBytes) {
  331. return false;
  332. }
  333. if (rowBytes % GrColorTypeBytesPerPixel(dstColorType)) {
  334. return false;
  335. }
  336. }
  337. if (GrPixelConfigIsCompressed(surface->config())) {
  338. return false;
  339. }
  340. this->handleDirtyContext();
  341. return this->onReadPixels(surface, left, top, width, height, dstColorType, buffer, rowBytes);
  342. }
  343. bool GrGpu::writePixels(GrSurface* surface, int left, int top, int width, int height,
  344. GrColorType srcColorType, const GrMipLevel texels[], int mipLevelCount) {
  345. TRACE_EVENT0("skia.gpu", TRACE_FUNC);
  346. SkASSERT(surface);
  347. if (surface->readOnly()) {
  348. return false;
  349. }
  350. if (mipLevelCount == 0) {
  351. return false;
  352. } else if (mipLevelCount == 1) {
  353. // We require that if we are not mipped, then the write region is contained in the surface
  354. auto subRect = SkIRect::MakeXYWH(left, top, width, height);
  355. auto bounds = SkIRect::MakeWH(surface->width(), surface->height());
  356. if (!bounds.contains(subRect)) {
  357. return false;
  358. }
  359. } else if (0 != left || 0 != top || width != surface->width() || height != surface->height()) {
  360. // We require that if the texels are mipped, than the write region is the entire surface
  361. return false;
  362. }
  363. int bpp = GrColorTypeBytesPerPixel(srcColorType);
  364. if (!validate_levels(width, height, texels, mipLevelCount, bpp, this->caps())) {
  365. return false;
  366. }
  367. this->handleDirtyContext();
  368. if (this->onWritePixels(surface, left, top, width, height, srcColorType, texels,
  369. mipLevelCount)) {
  370. SkIRect rect = SkIRect::MakeXYWH(left, top, width, height);
  371. this->didWriteToSurface(surface, kTopLeft_GrSurfaceOrigin, &rect, mipLevelCount);
  372. fStats.incTextureUploads();
  373. return true;
  374. }
  375. return false;
  376. }
  377. bool GrGpu::transferPixelsTo(GrTexture* texture, int left, int top, int width, int height,
  378. GrColorType bufferColorType, GrGpuBuffer* transferBuffer,
  379. size_t offset, size_t rowBytes) {
  380. TRACE_EVENT0("skia.gpu", TRACE_FUNC);
  381. SkASSERT(texture);
  382. SkASSERT(transferBuffer);
  383. if (texture->readOnly()) {
  384. return false;
  385. }
  386. // We require that the write region is contained in the texture
  387. SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
  388. SkIRect bounds = SkIRect::MakeWH(texture->width(), texture->height());
  389. if (!bounds.contains(subRect)) {
  390. return false;
  391. }
  392. int bpp = GrColorTypeBytesPerPixel(bufferColorType);
  393. if (this->caps()->writePixelsRowBytesSupport()) {
  394. if (rowBytes < SkToSizeT(bpp * width)) {
  395. return false;
  396. }
  397. if (rowBytes % bpp) {
  398. return false;
  399. }
  400. } else {
  401. if (rowBytes != SkToSizeT(bpp * width)) {
  402. return false;
  403. }
  404. }
  405. this->handleDirtyContext();
  406. if (this->onTransferPixelsTo(texture, left, top, width, height, bufferColorType, transferBuffer,
  407. offset, rowBytes)) {
  408. SkIRect rect = SkIRect::MakeXYWH(left, top, width, height);
  409. this->didWriteToSurface(texture, kTopLeft_GrSurfaceOrigin, &rect);
  410. fStats.incTransfersToTexture();
  411. return true;
  412. }
  413. return false;
  414. }
  415. bool GrGpu::transferPixelsFrom(GrSurface* surface, int left, int top, int width, int height,
  416. GrColorType bufferColorType, GrGpuBuffer* transferBuffer,
  417. size_t offset) {
  418. TRACE_EVENT0("skia.gpu", TRACE_FUNC);
  419. SkASSERT(surface);
  420. SkASSERT(transferBuffer);
  421. SkASSERT(this->caps()->transferFromOffsetAlignment(bufferColorType));
  422. SkASSERT(offset % this->caps()->transferFromOffsetAlignment(bufferColorType) == 0);
  423. // We require that the write region is contained in the texture
  424. SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
  425. SkIRect bounds = SkIRect::MakeWH(surface->width(), surface->height());
  426. if (!bounds.contains(subRect)) {
  427. return false;
  428. }
  429. this->handleDirtyContext();
  430. if (this->onTransferPixelsFrom(surface, left, top, width, height, bufferColorType,
  431. transferBuffer, offset)) {
  432. fStats.incTransfersFromSurface();
  433. return true;
  434. }
  435. return false;
  436. }
  437. bool GrGpu::regenerateMipMapLevels(GrTexture* texture) {
  438. TRACE_EVENT0("skia.gpu", TRACE_FUNC);
  439. SkASSERT(texture);
  440. SkASSERT(this->caps()->mipMapSupport());
  441. SkASSERT(texture->texturePriv().mipMapped() == GrMipMapped::kYes);
  442. SkASSERT(texture->texturePriv().mipMapsAreDirty());
  443. SkASSERT(!texture->asRenderTarget() || !texture->asRenderTarget()->needsResolve());
  444. if (texture->readOnly()) {
  445. return false;
  446. }
  447. if (this->onRegenerateMipMapLevels(texture)) {
  448. texture->texturePriv().markMipMapsClean();
  449. return true;
  450. }
  451. return false;
  452. }
  453. void GrGpu::resetTextureBindings() {
  454. this->handleDirtyContext();
  455. this->onResetTextureBindings();
  456. }
  457. void GrGpu::resolveRenderTarget(GrRenderTarget* target) {
  458. SkASSERT(target);
  459. this->handleDirtyContext();
  460. this->onResolveRenderTarget(target);
  461. }
  462. void GrGpu::didWriteToSurface(GrSurface* surface, GrSurfaceOrigin origin, const SkIRect* bounds,
  463. uint32_t mipLevels) const {
  464. SkASSERT(surface);
  465. SkASSERT(!surface->readOnly());
  466. // Mark any MIP chain and resolve buffer as dirty if and only if there is a non-empty bounds.
  467. if (nullptr == bounds || !bounds->isEmpty()) {
  468. if (GrRenderTarget* target = surface->asRenderTarget()) {
  469. SkIRect flippedBounds;
  470. if (kBottomLeft_GrSurfaceOrigin == origin && bounds) {
  471. flippedBounds = {bounds->fLeft, surface->height() - bounds->fBottom,
  472. bounds->fRight, surface->height() - bounds->fTop};
  473. bounds = &flippedBounds;
  474. }
  475. target->flagAsNeedingResolve(bounds);
  476. }
  477. GrTexture* texture = surface->asTexture();
  478. if (texture && 1 == mipLevels) {
  479. texture->texturePriv().markMipMapsDirty();
  480. }
  481. }
  482. }
  483. int GrGpu::findOrAssignSamplePatternKey(GrRenderTarget* renderTarget) {
  484. SkASSERT(this->caps()->sampleLocationsSupport());
  485. SkASSERT(renderTarget->numSamples() > 1 ||
  486. (renderTarget->renderTargetPriv().getStencilAttachment() &&
  487. renderTarget->renderTargetPriv().getStencilAttachment()->numSamples() > 1));
  488. SkSTArray<16, SkPoint> sampleLocations;
  489. this->querySampleLocations(renderTarget, &sampleLocations);
  490. return fSamplePatternDictionary.findOrAssignSamplePatternKey(sampleLocations);
  491. }
  492. GrSemaphoresSubmitted GrGpu::finishFlush(GrSurfaceProxy* proxies[],
  493. int n,
  494. SkSurface::BackendSurfaceAccess access,
  495. const GrFlushInfo& info,
  496. const GrPrepareForExternalIORequests& externalRequests) {
  497. TRACE_EVENT0("skia.gpu", TRACE_FUNC);
  498. this->stats()->incNumFinishFlushes();
  499. GrResourceProvider* resourceProvider = fContext->priv().resourceProvider();
  500. if (this->caps()->semaphoreSupport()) {
  501. for (int i = 0; i < info.fNumSemaphores; ++i) {
  502. sk_sp<GrSemaphore> semaphore;
  503. if (info.fSignalSemaphores[i].isInitialized()) {
  504. semaphore = resourceProvider->wrapBackendSemaphore(
  505. info.fSignalSemaphores[i],
  506. GrResourceProvider::SemaphoreWrapType::kWillSignal,
  507. kBorrow_GrWrapOwnership);
  508. } else {
  509. semaphore = resourceProvider->makeSemaphore(false);
  510. }
  511. this->insertSemaphore(semaphore);
  512. if (!info.fSignalSemaphores[i].isInitialized()) {
  513. info.fSignalSemaphores[i] = semaphore->backendSemaphore();
  514. }
  515. }
  516. }
  517. this->onFinishFlush(proxies, n, access, info, externalRequests);
  518. return this->caps()->semaphoreSupport() ? GrSemaphoresSubmitted::kYes
  519. : GrSemaphoresSubmitted::kNo;
  520. }
  521. #ifdef SK_ENABLE_DUMP_GPU
  522. void GrGpu::dumpJSON(SkJSONWriter* writer) const {
  523. writer->beginObject();
  524. // TODO: Is there anything useful in the base class to dump here?
  525. this->onDumpJSON(writer);
  526. writer->endObject();
  527. }
  528. #else
  529. void GrGpu::dumpJSON(SkJSONWriter* writer) const { }
  530. #endif
  531. #if GR_TEST_UTILS
  532. #if GR_GPU_STATS
  533. void GrGpu::Stats::dump(SkString* out) {
  534. out->appendf("Render Target Binds: %d\n", fRenderTargetBinds);
  535. out->appendf("Shader Compilations: %d\n", fShaderCompilations);
  536. out->appendf("Textures Created: %d\n", fTextureCreates);
  537. out->appendf("Texture Uploads: %d\n", fTextureUploads);
  538. out->appendf("Transfers to Texture: %d\n", fTransfersToTexture);
  539. out->appendf("Transfers from Surface: %d\n", fTransfersFromSurface);
  540. out->appendf("Stencil Buffer Creates: %d\n", fStencilAttachmentCreates);
  541. out->appendf("Number of draws: %d\n", fNumDraws);
  542. out->appendf("Number of Scratch Textures reused %d\n", fNumScratchTexturesReused);
  543. }
  544. void GrGpu::Stats::dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) {
  545. keys->push_back(SkString("render_target_binds")); values->push_back(fRenderTargetBinds);
  546. keys->push_back(SkString("shader_compilations")); values->push_back(fShaderCompilations);
  547. }
  548. #endif
  549. #endif