skia_output_surface_impl_on_gpu.cc 87 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263
  1. // Copyright 2018 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "components/viz/service/display_embedder/skia_output_surface_impl_on_gpu.h"
  5. #include <memory>
  6. #include <vector>
  7. #include "base/atomic_sequence_num.h"
  8. #include "base/bind.h"
  9. #include "base/callback_forward.h"
  10. #include "base/callback_helpers.h"
  11. #include "base/debug/crash_logging.h"
  12. #include "base/memory/raw_ptr.h"
  13. #include "base/memory/scoped_refptr.h"
  14. #include "base/notreached.h"
  15. #include "base/task/bind_post_task.h"
  16. #include "base/threading/thread_checker.h"
  17. #include "base/threading/thread_task_runner_handle.h"
  18. #include "base/trace_event/memory_dump_manager.h"
  19. #include "base/trace_event/trace_event.h"
  20. #include "build/build_config.h"
  21. #include "components/viz/common/features.h"
  22. #include "components/viz/common/frame_sinks/blit_request.h"
  23. #include "components/viz/common/frame_sinks/copy_output_request.h"
  24. #include "components/viz/common/frame_sinks/copy_output_util.h"
  25. #include "components/viz/common/gpu/vulkan_context_provider.h"
  26. #include "components/viz/common/resources/release_callback.h"
  27. #include "components/viz/common/resources/resource_format_utils.h"
  28. #include "components/viz/common/skia_helper.h"
  29. #include "components/viz/common/viz_utils.h"
  30. #include "components/viz/service/display/output_surface_frame.h"
  31. #include "components/viz/service/display/overlay_candidate.h"
  32. #include "components/viz/service/display_embedder/image_context_impl.h"
  33. #include "components/viz/service/display_embedder/output_presenter_gl.h"
  34. #include "components/viz/service/display_embedder/skia_output_device.h"
  35. #include "components/viz/service/display_embedder/skia_output_device_buffer_queue.h"
  36. #include "components/viz/service/display_embedder/skia_output_device_gl.h"
  37. #include "components/viz/service/display_embedder/skia_output_device_offscreen.h"
  38. #include "components/viz/service/display_embedder/skia_output_device_webview.h"
  39. #include "components/viz/service/display_embedder/skia_output_surface_dependency.h"
  40. #include "components/viz/service/display_embedder/skia_render_copy_results.h"
  41. #include "gpu/command_buffer/common/mailbox.h"
  42. #include "gpu/command_buffer/common/mailbox_holder.h"
  43. #include "gpu/command_buffer/common/swap_buffers_complete_params.h"
  44. #include "gpu/command_buffer/common/sync_token.h"
  45. #include "gpu/command_buffer/service/external_semaphore.h"
  46. #include "gpu/command_buffer/service/gr_shader_cache.h"
  47. #include "gpu/command_buffer/service/memory_tracking.h"
  48. #include "gpu/command_buffer/service/scheduler.h"
  49. #include "gpu/command_buffer/service/shared_image/shared_image_representation.h"
  50. #include "gpu/command_buffer/service/skia_utils.h"
  51. #include "gpu/config/gpu_preferences.h"
  52. #include "gpu/ipc/common/gpu_client_ids.h"
  53. #include "gpu/ipc/common/gpu_peak_memory.h"
  54. #include "gpu/ipc/common/gpu_surface_lookup.h"
  55. #include "gpu/vulkan/buildflags.h"
  56. #include "skia/buildflags.h"
  57. #include "skia/ext/legacy_display_globals.h"
  58. #include "skia/ext/rgba_to_yuva.h"
  59. #include "third_party/abseil-cpp/absl/types/optional.h"
  60. #include "third_party/libyuv/include/libyuv/planar_functions.h"
  61. #include "third_party/skia/include/core/SkBitmap.h"
  62. #include "third_party/skia/include/core/SkBlendMode.h"
  63. #include "third_party/skia/include/core/SkCanvas.h"
  64. #include "third_party/skia/include/core/SkColor.h"
  65. #include "third_party/skia/include/core/SkColorSpace.h"
  66. #include "third_party/skia/include/core/SkDeferredDisplayList.h"
  67. #include "third_party/skia/include/core/SkImageInfo.h"
  68. #include "third_party/skia/include/core/SkPixelRef.h"
  69. #include "third_party/skia/include/core/SkSamplingOptions.h"
  70. #include "third_party/skia/include/core/SkSize.h"
  71. #include "third_party/skia/include/core/SkYUVAInfo.h"
  72. #include "third_party/skia/include/gpu/GrTypes.h"
  73. #include "ui/gfx/color_space.h"
  74. #include "ui/gfx/geometry/rect_conversions.h"
  75. #include "ui/gfx/geometry/skia_conversions.h"
  76. #include "ui/gfx/gpu_fence_handle.h"
  77. #include "ui/gl/gl_fence.h"
  78. #include "ui/gl/gl_surface.h"
  79. #if BUILDFLAG(IS_WIN)
  80. #include "components/viz/service/display/dc_layer_overlay.h"
  81. #endif
  82. #if BUILDFLAG(ENABLE_VULKAN)
  83. #include "components/viz/service/display_embedder/skia_output_device_vulkan.h"
  84. #include "gpu/vulkan/vulkan_device_queue.h"
  85. #include "gpu/vulkan/vulkan_function_pointers.h"
  86. #include "gpu/vulkan/vulkan_implementation.h"
  87. #include "gpu/vulkan/vulkan_util.h"
  88. #if BUILDFLAG(IS_ANDROID)
  89. #include "components/viz/service/display_embedder/skia_output_device_vulkan_secondary_cb.h"
  90. #endif
  91. #endif
  92. #if defined(USE_OZONE)
  93. #include "ui/ozone/buildflags.h"
  94. #include "ui/ozone/public/ozone_platform.h"
  95. #include "ui/ozone/public/platform_window_surface.h"
  96. #include "ui/ozone/public/surface_factory_ozone.h"
  97. #if BUILDFLAG(OZONE_PLATFORM_X11)
  98. #define USE_OZONE_PLATFORM_X11
  99. #endif
  100. #endif
  101. #if (BUILDFLAG(ENABLE_VULKAN) || BUILDFLAG(SKIA_USE_DAWN)) && \
  102. defined(USE_OZONE_PLATFORM_X11)
  103. #include "components/viz/service/display_embedder/skia_output_device_x11.h"
  104. #endif
  105. #if BUILDFLAG(SKIA_USE_DAWN)
  106. #include "components/viz/common/gpu/dawn_context_provider.h"
  107. #if BUILDFLAG(IS_WIN)
  108. #include "components/viz/service/display_embedder/skia_output_device_dawn.h"
  109. #endif
  110. #endif
  111. #if BUILDFLAG(IS_FUCHSIA)
  112. #include "components/viz/service/display_embedder/output_presenter_fuchsia.h"
  113. #endif
  114. namespace viz {
  115. namespace {
  116. template <typename... Args>
  117. void PostAsyncTaskRepeatedly(
  118. base::WeakPtr<SkiaOutputSurfaceImplOnGpu> impl_on_gpu,
  119. const base::RepeatingCallback<void(Args...)>& callback,
  120. Args... args) {
  121. // Callbacks generated by this function may be executed asynchronously
  122. // (e.g. by presentation feedback) after |impl_on_gpu| has been destroyed.
  123. if (impl_on_gpu)
  124. impl_on_gpu->PostTaskToClientThread(base::BindOnce(callback, args...));
  125. }
  126. template <typename... Args>
  127. base::RepeatingCallback<void(Args...)> CreateSafeRepeatingCallback(
  128. base::WeakPtr<SkiaOutputSurfaceImplOnGpu> impl_on_gpu,
  129. const base::RepeatingCallback<void(Args...)>& callback) {
  130. return base::BindRepeating(&PostAsyncTaskRepeatedly<Args...>, impl_on_gpu,
  131. callback);
  132. }
  133. void FailedSkiaFlush(base::StringPiece msg) {
  134. static auto* kCrashKey = base::debug::AllocateCrashKeyString(
  135. "sk_flush_failed", base::debug::CrashKeySize::Size64);
  136. base::debug::SetCrashKeyString(kCrashKey, msg);
  137. LOG(ERROR) << msg;
  138. }
  139. #if BUILDFLAG(ENABLE_VULKAN)
  140. // Returns whether SkiaOutputDeviceX11 can be instantiated on this platform.
  141. bool MayFallBackToSkiaOutputDeviceX11() {
  142. #if defined(USE_OZONE)
  143. return ui::OzonePlatform::GetInstance()
  144. ->GetPlatformProperties()
  145. .skia_can_fall_back_to_x11;
  146. #else
  147. return false;
  148. #endif // defined(USE_OZONE)
  149. }
  150. #endif // BUILDFLAG(ENABLE_VULKAN)
  151. } // namespace
  152. SkiaOutputSurfaceImplOnGpu::PromiseImageAccessHelper::PromiseImageAccessHelper(
  153. SkiaOutputSurfaceImplOnGpu* impl_on_gpu)
  154. : impl_on_gpu_(impl_on_gpu) {}
  155. SkiaOutputSurfaceImplOnGpu::PromiseImageAccessHelper::
  156. ~PromiseImageAccessHelper() {
  157. DCHECK(image_contexts_.empty() || impl_on_gpu_->was_context_lost());
  158. }
  159. void SkiaOutputSurfaceImplOnGpu::PromiseImageAccessHelper::BeginAccess(
  160. std::vector<ImageContextImpl*> image_contexts,
  161. std::vector<GrBackendSemaphore>* begin_semaphores,
  162. std::vector<GrBackendSemaphore>* end_semaphores) {
  163. // GL doesn't need semaphores.
  164. if (!impl_on_gpu_->context_state_->GrContextIsGL()) {
  165. DCHECK(begin_semaphores);
  166. DCHECK(end_semaphores);
  167. begin_semaphores->reserve(image_contexts.size());
  168. // We may need one more space for the swap buffer semaphore.
  169. end_semaphores->reserve(image_contexts.size() + 1);
  170. }
  171. image_contexts_.reserve(image_contexts.size() + image_contexts_.size());
  172. image_contexts_.insert(image_contexts.begin(), image_contexts.end());
  173. impl_on_gpu_->BeginAccessImages(std::move(image_contexts), begin_semaphores,
  174. end_semaphores);
  175. }
  176. void SkiaOutputSurfaceImplOnGpu::PromiseImageAccessHelper::EndAccess() {
  177. impl_on_gpu_->EndAccessImages(image_contexts_);
  178. image_contexts_.clear();
  179. }
  180. namespace {
  181. scoped_refptr<gpu::SyncPointClientState> CreateSyncPointClientState(
  182. SkiaOutputSurfaceDependency* deps,
  183. gpu::CommandBufferId command_buffer_id,
  184. gpu::SequenceId sequence_id) {
  185. return deps->GetSyncPointManager()->CreateSyncPointClientState(
  186. gpu::CommandBufferNamespace::VIZ_SKIA_OUTPUT_SURFACE, command_buffer_id,
  187. sequence_id);
  188. }
  189. std::unique_ptr<gpu::SharedImageFactory> CreateSharedImageFactory(
  190. SkiaOutputSurfaceDependency* deps,
  191. gpu::MemoryTracker* memory_tracker) {
  192. return std::make_unique<gpu::SharedImageFactory>(
  193. deps->GetGpuPreferences(), deps->GetGpuDriverBugWorkarounds(),
  194. deps->GetGpuFeatureInfo(), deps->GetSharedContextState().get(),
  195. deps->GetMailboxManager(), deps->GetSharedImageManager(),
  196. deps->GetGpuImageFactory(), memory_tracker,
  197. /*is_for_display_compositor=*/true);
  198. }
  199. std::unique_ptr<gpu::SharedImageRepresentationFactory>
  200. CreateSharedImageRepresentationFactory(SkiaOutputSurfaceDependency* deps,
  201. gpu::MemoryTracker* memory_tracker) {
  202. return std::make_unique<gpu::SharedImageRepresentationFactory>(
  203. deps->GetSharedImageManager(), memory_tracker);
  204. }
  205. } // namespace
  206. SkiaOutputSurfaceImplOnGpu::ReleaseCurrent::ReleaseCurrent(
  207. scoped_refptr<gl::GLSurface> gl_surface,
  208. scoped_refptr<gpu::SharedContextState> context_state)
  209. : gl_surface_(gl_surface), context_state_(context_state) {}
  210. SkiaOutputSurfaceImplOnGpu::ReleaseCurrent::~ReleaseCurrent() {
  211. if (context_state_ && gl_surface_)
  212. context_state_->ReleaseCurrent(gl_surface_.get());
  213. }
  214. class SkiaOutputSurfaceImplOnGpu::DisplayContext : public gpu::DisplayContext {
  215. public:
  216. DisplayContext(SkiaOutputSurfaceDependency* deps,
  217. SkiaOutputSurfaceImplOnGpu* owner)
  218. : dependency_(deps), owner_(owner) {
  219. dependency_->RegisterDisplayContext(this);
  220. }
  221. ~DisplayContext() override { dependency_->UnregisterDisplayContext(this); }
  222. DisplayContext(const DisplayContext&) = delete;
  223. DisplayContext& operator=(const DisplayContext&) = delete;
  224. // gpu::DisplayContext implementation
  225. void MarkContextLost() override {
  226. owner_->MarkContextLost(CONTEXT_LOST_UNKNOWN);
  227. }
  228. private:
  229. const raw_ptr<SkiaOutputSurfaceDependency> dependency_;
  230. const raw_ptr<SkiaOutputSurfaceImplOnGpu> owner_;
  231. };
  232. // static
  233. std::unique_ptr<SkiaOutputSurfaceImplOnGpu> SkiaOutputSurfaceImplOnGpu::Create(
  234. SkiaOutputSurfaceDependency* deps,
  235. const RendererSettings& renderer_settings,
  236. const gpu::SequenceId sequence_id,
  237. gpu::DisplayCompositorMemoryAndTaskControllerOnGpu* shared_gpu_deps,
  238. DidSwapBufferCompleteCallback did_swap_buffer_complete_callback,
  239. BufferPresentedCallback buffer_presented_callback,
  240. ContextLostCallback context_lost_callback,
  241. ScheduleGpuTaskCallback schedule_gpu_task,
  242. GpuVSyncCallback gpu_vsync_callback) {
  243. TRACE_EVENT0("viz", "SkiaOutputSurfaceImplOnGpu::Create");
  244. auto context_state = deps->GetSharedContextState();
  245. if (!context_state)
  246. return nullptr;
  247. // Even with Vulkan/Dawn compositing, the SharedImageFactory constructor
  248. // always initializes a GL-backed SharedImage factory to fall back on.
  249. // Creating the GLTextureImageBackingFactory invokes GL API calls, so
  250. // we need to ensure there is a current GL context.
  251. if (!context_state->MakeCurrent(nullptr, true /* need_gl */)) {
  252. LOG(ERROR) << "Failed to make current during initialization.";
  253. return nullptr;
  254. }
  255. context_state->set_need_context_state_reset(true);
  256. auto impl_on_gpu = std::make_unique<SkiaOutputSurfaceImplOnGpu>(
  257. base::PassKey<SkiaOutputSurfaceImplOnGpu>(), deps,
  258. context_state->feature_info(), renderer_settings, sequence_id,
  259. shared_gpu_deps, std::move(did_swap_buffer_complete_callback),
  260. std::move(buffer_presented_callback), std::move(context_lost_callback),
  261. std::move(schedule_gpu_task), std::move(gpu_vsync_callback));
  262. if (!impl_on_gpu->Initialize())
  263. return nullptr;
  264. return impl_on_gpu;
  265. }
  266. SkiaOutputSurfaceImplOnGpu::SkiaOutputSurfaceImplOnGpu(
  267. base::PassKey<SkiaOutputSurfaceImplOnGpu> /* pass_key */,
  268. SkiaOutputSurfaceDependency* deps,
  269. scoped_refptr<gpu::gles2::FeatureInfo> feature_info,
  270. const RendererSettings& renderer_settings,
  271. const gpu::SequenceId sequence_id,
  272. gpu::DisplayCompositorMemoryAndTaskControllerOnGpu* shared_gpu_deps,
  273. DidSwapBufferCompleteCallback did_swap_buffer_complete_callback,
  274. BufferPresentedCallback buffer_presented_callback,
  275. ContextLostCallback context_lost_callback,
  276. ScheduleGpuTaskCallback schedule_gpu_task,
  277. GpuVSyncCallback gpu_vsync_callback)
  278. : dependency_(std::move(deps)),
  279. shared_gpu_deps_(shared_gpu_deps),
  280. feature_info_(std::move(feature_info)),
  281. sync_point_client_state_(
  282. CreateSyncPointClientState(dependency_,
  283. shared_gpu_deps_->command_buffer_id(),
  284. sequence_id)),
  285. shared_image_factory_(
  286. CreateSharedImageFactory(dependency_,
  287. shared_gpu_deps_->memory_tracker())),
  288. shared_image_representation_factory_(
  289. CreateSharedImageRepresentationFactory(
  290. dependency_,
  291. shared_gpu_deps_->memory_tracker())),
  292. vulkan_context_provider_(dependency_->GetVulkanContextProvider()),
  293. dawn_context_provider_(dependency_->GetDawnContextProvider()),
  294. renderer_settings_(renderer_settings),
  295. did_swap_buffer_complete_callback_(
  296. std::move(did_swap_buffer_complete_callback)),
  297. context_lost_callback_(std::move(context_lost_callback)),
  298. schedule_gpu_task_(std::move(schedule_gpu_task)),
  299. gpu_vsync_callback_(std::move(gpu_vsync_callback)),
  300. gpu_preferences_(dependency_->GetGpuPreferences()),
  301. display_context_(std::make_unique<DisplayContext>(deps, this)),
  302. async_read_result_lock_(base::MakeRefCounted<AsyncReadResultLock>()) {
  303. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  304. weak_ptr_ = weak_ptr_factory_.GetWeakPtr();
  305. buffer_presented_callback_ = CreateSafeRepeatingCallback(
  306. weak_ptr_, std::move(buffer_presented_callback));
  307. }
  308. void SkiaOutputSurfaceImplOnGpu::ReleaseAsyncReadResultHelpers() {
  309. base::AutoLock auto_lock(async_read_result_lock_->lock());
  310. for (auto* helper : async_read_result_helpers_)
  311. helper->reset();
  312. async_read_result_helpers_.clear();
  313. }
  314. SkiaOutputSurfaceImplOnGpu::~SkiaOutputSurfaceImplOnGpu() {
  315. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  316. // We need to have context current or lost during the destruction.
  317. bool has_context = false;
  318. if (context_state_) {
  319. context_state_->RemoveContextLostObserver(this);
  320. has_context = MakeCurrent(/*need_framebuffer=*/false);
  321. if (has_context) {
  322. release_current_last_.emplace(gl_surface_, context_state_);
  323. }
  324. }
  325. DCHECK(copy_output_images_.empty() || context_state_)
  326. << "We must have a valid context if copy requests were serviced";
  327. copy_output_images_.clear();
  328. // |output_device_| may still need |shared_image_factory_|, so release it
  329. // first.
  330. output_device_.reset();
  331. // Since SharedImageFactory also has a reference to ImplOnGpu's member
  332. // SharedContextState, we need to explicitly invoke the factory's destructor
  333. // before deleting ImplOnGpu's other member variables.
  334. shared_image_factory_.reset();
  335. if (has_context) {
  336. absl::optional<gpu::raster::GrShaderCache::ScopedCacheUse> cache_use;
  337. if (dependency_->GetGrShaderCache()) {
  338. cache_use.emplace(dependency_->GetGrShaderCache(),
  339. gpu::kDisplayCompositorClientId);
  340. }
  341. // This ensures any outstanding callbacks for promise images are
  342. // performed.
  343. gr_context()->flushAndSubmit();
  344. }
  345. sync_point_client_state_->Destroy();
  346. // Release all ongoing AsyncReadResults.
  347. ReleaseAsyncReadResultHelpers();
  348. }
  349. void SkiaOutputSurfaceImplOnGpu::Reshape(
  350. const SkSurfaceCharacterization& characterization,
  351. const gfx::ColorSpace& color_space,
  352. float device_scale_factor,
  353. gfx::OverlayTransform transform) {
  354. TRACE_EVENT0("viz", "SkiaOutputSurfaceImplOnGpu::Reshape");
  355. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  356. DCHECK(gr_context());
  357. if (context_is_lost_)
  358. return;
  359. size_ = gfx::SkISizeToSize(characterization.dimensions());
  360. if (!output_device_->Reshape(characterization, color_space,
  361. device_scale_factor, transform)) {
  362. MarkContextLost(CONTEXT_LOST_RESHAPE_FAILED);
  363. }
  364. }
  365. void SkiaOutputSurfaceImplOnGpu::DrawOverdraw(
  366. sk_sp<SkDeferredDisplayList> overdraw_ddl,
  367. SkCanvas& canvas) {
  368. DCHECK(overdraw_ddl);
  369. sk_sp<SkSurface> overdraw_surface = SkSurface::MakeRenderTarget(
  370. gr_context(), overdraw_ddl->characterization(), SkBudgeted::kNo);
  371. overdraw_surface->draw(overdraw_ddl);
  372. destroy_after_swap_.push_back(std::move(overdraw_ddl));
  373. SkPaint paint;
  374. sk_sp<SkImage> overdraw_image = overdraw_surface->makeImageSnapshot();
  375. paint.setColorFilter(SkiaHelper::MakeOverdrawColorFilter());
  376. // TODO(xing.xu): move below to the thread where skia record happens.
  377. canvas.drawImage(overdraw_image.get(), 0, 0, SkSamplingOptions(), &paint);
  378. }
  379. void SkiaOutputSurfaceImplOnGpu::FinishPaintCurrentFrame(
  380. sk_sp<SkDeferredDisplayList> ddl,
  381. sk_sp<SkDeferredDisplayList> overdraw_ddl,
  382. std::vector<ImageContextImpl*> image_contexts,
  383. std::vector<gpu::SyncToken> sync_tokens,
  384. base::OnceClosure on_finished,
  385. base::OnceCallback<void(gfx::GpuFenceHandle)> return_release_fence_cb,
  386. absl::optional<gfx::Rect> draw_rectangle) {
  387. TRACE_EVENT0("viz", "SkiaOutputSurfaceImplOnGpu::FinishPaintCurrentFrame");
  388. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  389. DCHECK(!scoped_output_device_paint_);
  390. if (context_is_lost_)
  391. return;
  392. if (!ddl) {
  393. MarkContextLost(CONTEXT_LOST_UNKNOWN);
  394. return;
  395. }
  396. if (draw_rectangle) {
  397. if (!output_device_->SetDrawRectangle(*draw_rectangle)) {
  398. MarkContextLost(
  399. ContextLostReason::CONTEXT_LOST_SET_DRAW_RECTANGLE_FAILED);
  400. return;
  401. }
  402. }
  403. // We do not reset scoped_output_device_paint_ after drawing the ddl until
  404. // SwapBuffers() is called, because we may need access to output_sk_surface()
  405. // for CopyOutput().
  406. scoped_output_device_paint_ = output_device_->BeginScopedPaint();
  407. if (!scoped_output_device_paint_) {
  408. MarkContextLost(ContextLostReason::CONTEXT_LOST_BEGIN_PAINT_FAILED);
  409. return;
  410. }
  411. dependency_->ScheduleGrContextCleanup();
  412. {
  413. absl::optional<gpu::raster::GrShaderCache::ScopedCacheUse> cache_use;
  414. if (dependency_->GetGrShaderCache()) {
  415. cache_use.emplace(dependency_->GetGrShaderCache(),
  416. gpu::kDisplayCompositorClientId);
  417. }
  418. std::vector<GrBackendSemaphore> begin_semaphores;
  419. std::vector<GrBackendSemaphore> end_semaphores;
  420. promise_image_access_helper_.BeginAccess(
  421. std::move(image_contexts), &begin_semaphores, &end_semaphores);
  422. if (!begin_semaphores.empty()) {
  423. auto result = scoped_output_device_paint_->Wait(
  424. begin_semaphores.size(), begin_semaphores.data(),
  425. /*delete_semaphores_after_wait=*/false);
  426. DCHECK(result);
  427. }
  428. // Draw will only fail if the SkSurface and SkDDL are incompatible.
  429. bool draw_success = scoped_output_device_paint_->Draw(ddl);
  430. #if defined(USE_OZONE)
  431. if (!draw_success)
  432. DLOG(ERROR) << "output_sk_surface()->draw() failed.";
  433. #else
  434. DCHECK(draw_success);
  435. #endif // USE_OZONE
  436. destroy_after_swap_.emplace_back(std::move(ddl));
  437. if (overdraw_ddl) {
  438. DrawOverdraw(std::move(overdraw_ddl),
  439. *scoped_output_device_paint_->GetCanvas());
  440. }
  441. auto end_paint_semaphores =
  442. scoped_output_device_paint_->TakeEndPaintSemaphores();
  443. end_semaphores.insert(end_semaphores.end(), end_paint_semaphores.begin(),
  444. end_paint_semaphores.end());
  445. #if BUILDFLAG(ENABLE_VULKAN)
  446. // Semaphores for release fences for vulkan should be created before flush.
  447. if (!return_release_fence_cb.is_null() && is_using_vulkan()) {
  448. const bool result = CreateAndStoreExternalSemaphoreVulkan(end_semaphores);
  449. // A release fence will be created on submit as some platforms may use
  450. // VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT handle types for their
  451. // external semaphore. That handle type has COPY transference. Vulkan spec
  452. // says that semaphore has to be signaled, or have an associated semaphore
  453. // signal operation pending execution. Thus, delay importing the handle
  454. // and creating the fence until commands are submitted.
  455. pending_release_fence_cbs_.emplace_back(
  456. result ? end_semaphores.back() : GrBackendSemaphore(),
  457. std::move(return_release_fence_cb));
  458. }
  459. #endif
  460. const bool end_semaphores_empty = end_semaphores.empty();
  461. auto result = scoped_output_device_paint_->Flush(vulkan_context_provider_,
  462. std::move(end_semaphores),
  463. std::move(on_finished));
  464. if (result != GrSemaphoresSubmitted::kYes &&
  465. !(begin_semaphores.empty() && end_semaphores_empty)) {
  466. if (!return_release_fence_cb.is_null()) {
  467. PostTaskToClientThread(base::BindOnce(
  468. std::move(return_release_fence_cb), gfx::GpuFenceHandle()));
  469. }
  470. // TODO(penghuang): handle vulkan device lost.
  471. FailedSkiaFlush("output_sk_surface()->flush() failed.");
  472. return;
  473. }
  474. gfx::GpuFenceHandle release_fence;
  475. if (!return_release_fence_cb.is_null() && is_using_gl()) {
  476. DCHECK(release_fence.is_null());
  477. release_fence = CreateReleaseFenceForGL();
  478. }
  479. if (!return_release_fence_cb.is_null() && is_using_dawn())
  480. NOTIMPLEMENTED() << "Release fences with dawn are not supported.";
  481. if (!return_release_fence_cb.is_null()) {
  482. // Returning fences for Vulkan is delayed. See the comment above.
  483. DCHECK(!is_using_vulkan());
  484. PostTaskToClientThread(base::BindOnce(std::move(return_release_fence_cb),
  485. std::move(release_fence)));
  486. }
  487. }
  488. }
  489. void SkiaOutputSurfaceImplOnGpu::ScheduleOutputSurfaceAsOverlay(
  490. const OverlayProcessorInterface::OutputSurfaceOverlayPlane&
  491. output_surface_plane) {
  492. DCHECK(!output_surface_plane_);
  493. output_surface_plane_ = output_surface_plane;
  494. }
  495. void SkiaOutputSurfaceImplOnGpu::SwapBuffers(OutputSurfaceFrame frame) {
  496. TRACE_EVENT0("viz", "SkiaOutputSurfaceImplOnGpu::SwapBuffers");
  497. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  498. SwapBuffersInternal(std::move(frame));
  499. }
  500. void SkiaOutputSurfaceImplOnGpu::EnsureMinNumberOfBuffers(int n) {
  501. if (!output_device_->EnsureMinNumberOfBuffers(n)) {
  502. MarkContextLost(CONTEXT_LOST_ALLOCATE_FRAME_BUFFERS_FAILED);
  503. }
  504. }
  505. void SkiaOutputSurfaceImplOnGpu::SetDependenciesResolvedTimings(
  506. base::TimeTicks task_ready) {
  507. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  508. output_device_->SetDependencyTimings(task_ready);
  509. }
  510. void SkiaOutputSurfaceImplOnGpu::SetDrawTimings(base::TimeTicks task_posted) {
  511. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  512. output_device_->SetDrawTimings(task_posted, base::TimeTicks::Now());
  513. }
  514. void SkiaOutputSurfaceImplOnGpu::SwapBuffersSkipped() {
  515. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  516. SwapBuffersInternal(absl::nullopt);
  517. }
  518. void SkiaOutputSurfaceImplOnGpu::FinishPaintRenderPass(
  519. const gpu::Mailbox& mailbox,
  520. sk_sp<SkDeferredDisplayList> ddl,
  521. sk_sp<SkDeferredDisplayList> overdraw_ddl,
  522. std::vector<ImageContextImpl*> image_contexts,
  523. std::vector<gpu::SyncToken> sync_tokens,
  524. base::OnceClosure on_finished,
  525. base::OnceCallback<void(gfx::GpuFenceHandle)> return_release_fence_cb) {
  526. TRACE_EVENT0("viz", "SkiaOutputSurfaceImplOnGpu::FinishPaintRenderPass");
  527. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  528. DCHECK(ddl);
  529. if (context_is_lost_)
  530. return;
  531. if (!ddl) {
  532. MarkContextLost(CONTEXT_LOST_UNKNOWN);
  533. return;
  534. }
  535. auto backing_representation =
  536. shared_image_representation_factory_->ProduceSkia(mailbox,
  537. context_state_.get());
  538. DCHECK(backing_representation);
  539. std::vector<GrBackendSemaphore> begin_semaphores;
  540. std::vector<GrBackendSemaphore> end_semaphores;
  541. const auto& characterization = ddl->characterization();
  542. auto scoped_access = backing_representation->BeginScopedWriteAccess(
  543. characterization.sampleCount(), characterization.surfaceProps(),
  544. &begin_semaphores, &end_semaphores,
  545. gpu::SharedImageRepresentation::AllowUnclearedAccess::kYes);
  546. if (!scoped_access) {
  547. MarkContextLost(CONTEXT_LOST_UNKNOWN);
  548. return;
  549. }
  550. SkSurface* surface = scoped_access->surface();
  551. DCHECK(surface);
  552. {
  553. absl::optional<gpu::raster::GrShaderCache::ScopedCacheUse> cache_use;
  554. if (dependency_->GetGrShaderCache()) {
  555. cache_use.emplace(dependency_->GetGrShaderCache(),
  556. gpu::kDisplayCompositorClientId);
  557. }
  558. promise_image_access_helper_.BeginAccess(
  559. std::move(image_contexts), &begin_semaphores, &end_semaphores);
  560. if (!begin_semaphores.empty()) {
  561. auto result =
  562. surface->wait(begin_semaphores.size(), begin_semaphores.data(),
  563. /*deleteSemaphoresAfterWait=*/false);
  564. DCHECK(result);
  565. }
  566. surface->draw(ddl);
  567. backing_representation->SetCleared();
  568. destroy_after_swap_.emplace_back(std::move(ddl));
  569. if (overdraw_ddl) {
  570. DrawOverdraw(std::move(overdraw_ddl), *surface->getCanvas());
  571. }
  572. #if BUILDFLAG(ENABLE_VULKAN)
  573. // Semaphores for release fences for vulkan should be created before flush.
  574. if (!return_release_fence_cb.is_null() && is_using_vulkan()) {
  575. const bool result = CreateAndStoreExternalSemaphoreVulkan(end_semaphores);
  576. // A release fence will be created on submit as some platforms may use
  577. // VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT handle types for their
  578. // external semaphore. That handle type has COPY transference. Vulkan spec
  579. // says that semaphore has to be signaled, or have an associated semaphore
  580. // signal operation pending execution. Thus, delay importing the handle
  581. // and creating the fence until commands are submitted.
  582. pending_release_fence_cbs_.emplace_back(
  583. result ? end_semaphores.back() : GrBackendSemaphore(),
  584. std::move(return_release_fence_cb));
  585. }
  586. #endif
  587. GrFlushInfo flush_info = {
  588. .fNumSemaphores = end_semaphores.size(),
  589. .fSignalSemaphores = end_semaphores.data(),
  590. };
  591. gpu::AddVulkanCleanupTaskForSkiaFlush(vulkan_context_provider_,
  592. &flush_info);
  593. if (on_finished)
  594. gpu::AddCleanupTaskForSkiaFlush(std::move(on_finished), &flush_info);
  595. auto result = surface->flush(flush_info);
  596. if (result != GrSemaphoresSubmitted::kYes &&
  597. !(begin_semaphores.empty() && end_semaphores.empty())) {
  598. if (!return_release_fence_cb.is_null()) {
  599. PostTaskToClientThread(base::BindOnce(
  600. std::move(return_release_fence_cb), gfx::GpuFenceHandle()));
  601. }
  602. // TODO(penghuang): handle vulkan device lost.
  603. FailedSkiaFlush("offscreen.surface()->flush() failed.");
  604. return;
  605. }
  606. // If GL is used, create the release fence after flush.
  607. gfx::GpuFenceHandle release_fence;
  608. if (!return_release_fence_cb.is_null() && is_using_gl()) {
  609. DCHECK(release_fence.is_null());
  610. release_fence = CreateReleaseFenceForGL();
  611. }
  612. if (!return_release_fence_cb.is_null() && is_using_dawn())
  613. NOTIMPLEMENTED() << "Release fences with dawn are not supported.";
  614. if (!return_release_fence_cb.is_null()) {
  615. // Returning fences for Vulkan is delayed. See the comment above.
  616. DCHECK(!is_using_vulkan());
  617. PostTaskToClientThread(base::BindOnce(std::move(return_release_fence_cb),
  618. std::move(release_fence)));
  619. }
  620. bool sync_cpu =
  621. gpu::ShouldVulkanSyncCpuForSkiaSubmit(vulkan_context_provider_);
  622. if (sync_cpu) {
  623. gr_context()->submit(true);
  624. }
  625. }
  626. }
  627. std::unique_ptr<gpu::SkiaImageRepresentation>
  628. SkiaOutputSurfaceImplOnGpu::CreateSharedImageRepresentationSkia(
  629. ResourceFormat resource_format,
  630. const gfx::Size& size,
  631. const gfx::ColorSpace& color_space) {
  632. constexpr uint32_t kUsage = gpu::SHARED_IMAGE_USAGE_GLES2 |
  633. gpu::SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT |
  634. gpu::SHARED_IMAGE_USAGE_RASTER |
  635. gpu::SHARED_IMAGE_USAGE_DISPLAY;
  636. gpu::Mailbox mailbox = gpu::Mailbox::GenerateForSharedImage();
  637. bool result = shared_image_factory_->CreateSharedImage(
  638. mailbox, resource_format, size, color_space, kBottomLeft_GrSurfaceOrigin,
  639. kUnpremul_SkAlphaType, gpu::kNullSurfaceHandle, kUsage);
  640. if (!result) {
  641. DLOG(ERROR) << "Failed to create shared image.";
  642. return nullptr;
  643. }
  644. auto representation = dependency_->GetSharedImageManager()->ProduceSkia(
  645. mailbox, context_state_->memory_type_tracker(), context_state_);
  646. shared_image_factory_->DestroySharedImage(mailbox);
  647. return representation;
  648. }
  649. void SkiaOutputSurfaceImplOnGpu::CopyOutputRGBAInMemory(
  650. SkSurface* surface,
  651. copy_output::RenderPassGeometry geometry,
  652. const gfx::ColorSpace& color_space,
  653. const SkIRect& src_rect,
  654. SkSurface::RescaleMode rescale_mode,
  655. bool is_downscale_or_identity_in_both_dimensions,
  656. std::unique_ptr<CopyOutputRequest> request) {
  657. // If we can't convert |color_space| to a SkColorSpace (e.g. PIECEWISE_HDR),
  658. // request a sRGB destination color space for the copy result instead.
  659. gfx::ColorSpace dest_color_space = color_space;
  660. sk_sp<SkColorSpace> sk_color_space = color_space.ToSkColorSpace();
  661. if (!sk_color_space) {
  662. dest_color_space = gfx::ColorSpace::CreateSRGB();
  663. }
  664. SkImageInfo dst_info = SkImageInfo::Make(
  665. geometry.result_selection.width(), geometry.result_selection.height(),
  666. kN32_SkColorType, kPremul_SkAlphaType, sk_color_space);
  667. std::unique_ptr<ReadPixelsContext> context =
  668. std::make_unique<ReadPixelsContext>(std::move(request),
  669. geometry.result_selection,
  670. dest_color_space, weak_ptr_);
  671. // Skia readback could be synchronous. Incremement counter in case
  672. // ReadbackCompleted is called immediately.
  673. num_readbacks_pending_++;
  674. surface->asyncRescaleAndReadPixels(
  675. dst_info, src_rect, SkSurface::RescaleGamma::kSrc, rescale_mode,
  676. &CopyOutputResultSkiaRGBA::OnReadbackDone, context.release());
  677. }
  678. void SkiaOutputSurfaceImplOnGpu::CopyOutputRGBA(
  679. SkSurface* surface,
  680. copy_output::RenderPassGeometry geometry,
  681. const gfx::ColorSpace& color_space,
  682. const SkIRect& src_rect,
  683. SkSurface::RescaleMode rescale_mode,
  684. bool is_downscale_or_identity_in_both_dimensions,
  685. std::unique_ptr<CopyOutputRequest> request) {
  686. DCHECK_EQ(request->result_format(), CopyOutputRequest::ResultFormat::RGBA);
  687. switch (request->result_destination()) {
  688. case CopyOutputRequest::ResultDestination::kSystemMemory:
  689. CopyOutputRGBAInMemory(
  690. surface, geometry, color_space, src_rect, rescale_mode,
  691. is_downscale_or_identity_in_both_dimensions, std::move(request));
  692. break;
  693. case CopyOutputRequest::ResultDestination::kNativeTextures: {
  694. auto representation = CreateSharedImageRepresentationSkia(
  695. ResourceFormat::RGBA_8888,
  696. gfx::Size(geometry.result_bounds.width(),
  697. geometry.result_bounds.height()),
  698. color_space);
  699. if (!representation) {
  700. DLOG(ERROR) << "Failed to create shared image.";
  701. return;
  702. }
  703. SkSurfaceProps surface_props{0, kUnknown_SkPixelGeometry};
  704. std::vector<GrBackendSemaphore> begin_semaphores;
  705. std::vector<GrBackendSemaphore> end_semaphores;
  706. auto scoped_write = representation->BeginScopedWriteAccess(
  707. /*final_msaa_count=*/1, surface_props, &begin_semaphores,
  708. &end_semaphores,
  709. gpu::SharedImageRepresentation::AllowUnclearedAccess::kYes);
  710. absl::optional<SkVector> scaling;
  711. if (request->is_scaled()) {
  712. scaling =
  713. SkVector::Make(static_cast<SkScalar>(request->scale_to().x()) /
  714. request->scale_from().x(),
  715. static_cast<SkScalar>(request->scale_to().y()) /
  716. request->scale_from().y());
  717. }
  718. scoped_write->surface()->wait(begin_semaphores.size(),
  719. begin_semaphores.data());
  720. RenderSurface(surface, src_rect, scaling,
  721. is_downscale_or_identity_in_both_dimensions,
  722. scoped_write->surface());
  723. bool should_submit = !end_semaphores.empty();
  724. if (!FlushSurface(scoped_write->surface(), end_semaphores,
  725. scoped_write->TakeEndState())) {
  726. // TODO(penghuang): handle vulkan device lost.
  727. FailedSkiaFlush("CopyOutputRGBA dest_surface->flush()");
  728. return;
  729. }
  730. if (should_submit && !gr_context()->submit()) {
  731. DLOG(ERROR) << "CopyOutputRGBA gr_context->submit() failed";
  732. return;
  733. }
  734. representation->SetCleared();
  735. // Grab the mailbox before we transfer `representation`'s ownership:
  736. gpu::Mailbox mailbox = representation->mailbox();
  737. CopyOutputResult::ReleaseCallbacks release_callbacks;
  738. release_callbacks.push_back(
  739. CreateDestroyCopyOutputResourcesOnGpuThreadCallback(
  740. std::move(representation)));
  741. request->SendResult(std::make_unique<CopyOutputTextureResult>(
  742. CopyOutputResult::Format::RGBA, geometry.result_bounds,
  743. CopyOutputResult::TextureResult(mailbox, gpu::SyncToken(),
  744. color_space),
  745. std::move(release_callbacks)));
  746. break;
  747. }
  748. }
  749. }
  750. void SkiaOutputSurfaceImplOnGpu::RenderSurface(
  751. SkSurface* surface,
  752. const SkIRect& source_selection,
  753. absl::optional<SkVector> scaling,
  754. bool is_downscale_or_identity_in_both_dimensions,
  755. SkSurface* dest_surface) {
  756. SkCanvas* dest_canvas = dest_surface->getCanvas();
  757. int state_depth = dest_canvas->save();
  758. if (scaling.has_value()) {
  759. dest_canvas->scale(scaling->x(), scaling->y());
  760. }
  761. dest_canvas->clipRect(SkRect::MakeXYWH(0, 0, source_selection.width(),
  762. source_selection.height()));
  763. // TODO(b/197353769): Ideally, we should simply use a kSrc blending mode,
  764. // but for some reason, this triggers some antialiasing code that causes
  765. // various Vulkan tests to fail. We should investigate this and replace
  766. // this clear with blend mode.
  767. if (surface->imageInfo().alphaType() != kOpaque_SkAlphaType) {
  768. dest_canvas->clear(SK_ColorTRANSPARENT);
  769. }
  770. auto sampling =
  771. is_downscale_or_identity_in_both_dimensions
  772. ? SkSamplingOptions(SkFilterMode::kLinear, SkMipmapMode::kLinear)
  773. : SkSamplingOptions({1.0f / 3, 1.0f / 3});
  774. surface->draw(dest_canvas, -source_selection.x(), -source_selection.y(),
  775. sampling, /*paint=*/nullptr);
  776. dest_canvas->restoreToCount(state_depth);
  777. }
  778. bool SkiaOutputSurfaceImplOnGpu::FlushSurface(
  779. SkSurface* surface,
  780. std::vector<GrBackendSemaphore>& end_semaphores,
  781. std::unique_ptr<GrBackendSurfaceMutableState> end_state,
  782. GrGpuFinishedProc finished_proc,
  783. GrGpuFinishedContext finished_context) {
  784. GrFlushInfo flush_info;
  785. flush_info.fNumSemaphores = end_semaphores.size();
  786. flush_info.fSignalSemaphores = end_semaphores.data();
  787. flush_info.fFinishedProc = finished_proc;
  788. flush_info.fFinishedContext = finished_context;
  789. gpu::AddVulkanCleanupTaskForSkiaFlush(vulkan_context_provider_, &flush_info);
  790. GrSemaphoresSubmitted flush_result =
  791. surface->flush(flush_info, end_state.get());
  792. return flush_result == GrSemaphoresSubmitted::kYes || end_semaphores.empty();
  793. }
  794. SkiaOutputSurfaceImplOnGpu::PlaneAccessData::PlaneAccessData() = default;
  795. SkiaOutputSurfaceImplOnGpu::PlaneAccessData::~PlaneAccessData() = default;
  796. bool SkiaOutputSurfaceImplOnGpu::CreateSurfacesForNV12Planes(
  797. const SkYUVAInfo& yuva_info,
  798. const gfx::ColorSpace& color_space,
  799. std::array<PlaneAccessData, CopyOutputResult::kNV12MaxPlanes>&
  800. plane_access_datas) {
  801. std::array<SkISize, SkYUVAInfo::kMaxPlanes> plane_dimensions;
  802. int plane_number = yuva_info.planeDimensions(plane_dimensions.data());
  803. DCHECK_EQ(CopyOutputResult::kNV12MaxPlanes, static_cast<size_t>(plane_number))
  804. << "We expect SkYUVAInfo to describe an NV12 data, which contains 2 "
  805. "planes!";
  806. for (int i = 0; i < plane_number; ++i) {
  807. PlaneAccessData& plane_data = plane_access_datas[i];
  808. const SkISize& plane_size = plane_dimensions[i];
  809. const auto resource_format =
  810. (i == 0) ? ResourceFormat::RED_8 : ResourceFormat::RG_88;
  811. auto representation = CreateSharedImageRepresentationSkia(
  812. resource_format, gfx::SkISizeToSize(plane_size), color_space);
  813. if (!representation) {
  814. return false;
  815. }
  816. SkSurfaceProps surface_props{0, kUnknown_SkPixelGeometry};
  817. std::unique_ptr<gpu::SkiaImageRepresentation::ScopedWriteAccess>
  818. scoped_write = representation->BeginScopedWriteAccess(
  819. /*final_msaa_count=*/1, surface_props, &plane_data.begin_semaphores,
  820. &plane_data.end_semaphores,
  821. gpu::SharedImageRepresentation::AllowUnclearedAccess::kYes);
  822. SkSurface* dest_surface = scoped_write->surface();
  823. dest_surface->wait(plane_data.begin_semaphores.size(),
  824. plane_data.begin_semaphores.data());
  825. // Semaphores have already been populated in `plane_data`.
  826. // Set the remaining fields.
  827. plane_data.mailbox = representation->mailbox();
  828. plane_data.representation = std::move(representation);
  829. plane_data.scoped_write = std::move(scoped_write);
  830. plane_data.size = plane_size;
  831. }
  832. return true;
  833. }
  834. bool SkiaOutputSurfaceImplOnGpu::ImportSurfacesForNV12Planes(
  835. const BlitRequest& blit_request,
  836. std::array<PlaneAccessData, CopyOutputResult::kNV12MaxPlanes>&
  837. plane_access_datas) {
  838. for (size_t i = 0; i < CopyOutputResult::kNV12MaxPlanes; ++i) {
  839. const gpu::MailboxHolder& mailbox_holder = blit_request.mailbox(i);
  840. // Should never happen, mailboxes are validated when setting blit request on
  841. // a CopyOutputResult and we only access `kNV12MaxPlanes` mailboxes.
  842. DCHECK(!mailbox_holder.mailbox.IsZero());
  843. PlaneAccessData& plane_data = plane_access_datas[i];
  844. auto representation = dependency_->GetSharedImageManager()->ProduceSkia(
  845. mailbox_holder.mailbox, context_state_->memory_type_tracker(),
  846. context_state_);
  847. if (!representation) {
  848. return false;
  849. }
  850. SkSurfaceProps surface_props{0, kUnknown_SkPixelGeometry};
  851. std::unique_ptr<gpu::SkiaImageRepresentation::ScopedWriteAccess>
  852. scoped_write = representation->BeginScopedWriteAccess(
  853. /*final_msaa_count=*/1, surface_props, &plane_data.begin_semaphores,
  854. &plane_data.end_semaphores,
  855. gpu::SharedImageRepresentation::AllowUnclearedAccess::kYes);
  856. SkSurface* dest_surface = scoped_write->surface();
  857. dest_surface->wait(plane_data.begin_semaphores.size(),
  858. plane_data.begin_semaphores.data());
  859. // Semaphores have already been populated in `plane_data`.
  860. // Set the remaining fields.
  861. plane_data.size = gfx::SizeToSkISize(representation->size());
  862. plane_data.mailbox = representation->mailbox();
  863. plane_data.representation = std::move(representation);
  864. plane_data.scoped_write = std::move(scoped_write);
  865. }
  866. return true;
  867. }
  868. void SkiaOutputSurfaceImplOnGpu::BlendBitmapOverlays(
  869. SkCanvas* canvas,
  870. const BlitRequest& blit_request) {
  871. for (const BlendBitmap& blend_bitmap : blit_request.blend_bitmaps()) {
  872. SkPaint paint;
  873. paint.setBlendMode(SkBlendMode::kSrcOver);
  874. canvas->drawImageRect(blend_bitmap.image(),
  875. gfx::RectToSkRect(blend_bitmap.source_region()),
  876. gfx::RectToSkRect(blend_bitmap.destination_region()),
  877. SkSamplingOptions(SkFilterMode::kLinear), &paint,
  878. SkCanvas::kFast_SrcRectConstraint);
  879. }
  880. }
  881. void SkiaOutputSurfaceImplOnGpu::CopyOutputNV12(
  882. SkSurface* surface,
  883. copy_output::RenderPassGeometry geometry,
  884. const gfx::ColorSpace& color_space,
  885. const SkIRect& src_rect,
  886. SkSurface::RescaleMode rescale_mode,
  887. bool is_downscale_or_identity_in_both_dimensions,
  888. std::unique_ptr<CopyOutputRequest> request) {
  889. DCHECK(!request->has_blit_request() ||
  890. request->result_destination() ==
  891. CopyOutputRequest::ResultDestination::kNativeTextures)
  892. << "Only CopyOutputRequests that hand out native textures support blit "
  893. "requests!";
  894. DCHECK(!request->has_blit_request() || request->has_result_selection())
  895. << "Only CopyOutputRequests that specify result selection support blit "
  896. "requests!";
  897. // Overview:
  898. // 1. Try to create surfaces for NV12 planes (we know the needed size in
  899. // advance). If this fails, send an empty result. For requests that have a
  900. // blit request appended, the surfaces should be backed by caller-provided
  901. // textures.
  902. // 2. Render the desired region into a new SkSurface, taking into account
  903. // desired scaling and clipping.
  904. // 3. If blitting, honor the blend bitmap requests set by blending them onto
  905. // the surface produced in step 2.
  906. // 4. Grab an SkImage and convert it into multiple SkSurfaces created by
  907. // step 1, one for each plane.
  908. // 5. Depending on the result destination of the request, either:
  909. // - pass ownership of the textures to the caller (native textures result)
  910. // - schedule a read-back & expose its results to the caller (system memory
  911. // result)
  912. //
  913. // Note: in case the blit request populates the GMBs, the flow stays the same,
  914. // but we need to ensure that the results are only sent out after the
  915. // GpuMemoryBuffer is safe to map into system memory.
  916. // The size of the destination is passed in via `geometry.result_selection` -
  917. // it already takes into account the rect of the render pass that is being
  918. // copied, as well as area, scaling & result_selection of the `request`.
  919. // This represents the size of the intermediate texture that will be then
  920. // blitted to the destination textures.
  921. const gfx::Size intermediate_dst_size = geometry.result_selection.size();
  922. std::array<PlaneAccessData, CopyOutputResult::kNV12MaxPlanes>
  923. plane_access_datas;
  924. SkYUVAInfo yuva_info;
  925. bool destination_surfaces_ready = false;
  926. if (request->has_blit_request()) {
  927. if (request->result_selection().size() != intermediate_dst_size) {
  928. DLOG(WARNING)
  929. << __func__
  930. << ": result selection is different than render pass output, "
  931. "geometry="
  932. << geometry.ToString() << ", request=" << request->ToString();
  933. // Send empty result, we have a blit request that asks for a different
  934. // size than what we have available - the behavior in this case is
  935. // currently unspecified as we'd have to leave parts of the caller's
  936. // region unpopulated.
  937. return;
  938. }
  939. destination_surfaces_ready = ImportSurfacesForNV12Planes(
  940. request->blit_request(), plane_access_datas);
  941. // The entire destination image size is the same as the size of the luma
  942. // plane of the image that was just imported:
  943. yuva_info = SkYUVAInfo(
  944. plane_access_datas[0].size, SkYUVAInfo::PlaneConfig::kY_UV,
  945. SkYUVAInfo::Subsampling::k420, kRec709_Limited_SkYUVColorSpace);
  946. // Check if the destination will fit in the blit target:
  947. const gfx::Rect blit_destination_rect(
  948. request->blit_request().destination_region_offset(),
  949. intermediate_dst_size);
  950. const gfx::Rect blit_target_image_rect(
  951. gfx::SkISizeToSize(plane_access_datas[0].size));
  952. if (!blit_target_image_rect.Contains(blit_destination_rect)) {
  953. // Send empty result, the blit target image is not large enough to fit the
  954. // results.
  955. return;
  956. }
  957. } else {
  958. yuva_info = SkYUVAInfo(gfx::SizeToSkISize(intermediate_dst_size),
  959. SkYUVAInfo::PlaneConfig::kY_UV,
  960. SkYUVAInfo::Subsampling::k420,
  961. kRec709_Limited_SkYUVColorSpace);
  962. destination_surfaces_ready =
  963. CreateSurfacesForNV12Planes(yuva_info, color_space, plane_access_datas);
  964. }
  965. if (!destination_surfaces_ready) {
  966. DVLOG(1) << "failed to create / import destination surfaces";
  967. // Send empty result.
  968. return;
  969. }
  970. // Create a destination for the scaled & clipped result:
  971. auto intermediate_representation = CreateSharedImageRepresentationSkia(
  972. ResourceFormat::RGBA_8888, intermediate_dst_size, color_space);
  973. if (!intermediate_representation) {
  974. DVLOG(1) << "failed to create shared image representation for the "
  975. "intermediate surface";
  976. // Send empty result.
  977. return;
  978. }
  979. SkSurfaceProps surface_props{0, kUnknown_SkPixelGeometry};
  980. std::vector<GrBackendSemaphore> begin_semaphores;
  981. std::vector<GrBackendSemaphore> end_semaphores;
  982. auto intermediate_scoped_write =
  983. intermediate_representation->BeginScopedWriteAccess(
  984. /*final_msaa_count=*/1, surface_props, &begin_semaphores,
  985. &end_semaphores,
  986. gpu::SharedImageRepresentation::AllowUnclearedAccess::kYes);
  987. absl::optional<SkVector> scaling;
  988. if (request->is_scaled()) {
  989. scaling = SkVector::Make(static_cast<SkScalar>(request->scale_to().x()) /
  990. request->scale_from().x(),
  991. static_cast<SkScalar>(request->scale_to().y()) /
  992. request->scale_from().y());
  993. }
  994. intermediate_scoped_write->surface()->wait(begin_semaphores.size(),
  995. begin_semaphores.data());
  996. RenderSurface(surface, src_rect, scaling,
  997. is_downscale_or_identity_in_both_dimensions,
  998. intermediate_scoped_write->surface());
  999. if (request->has_blit_request()) {
  1000. BlendBitmapOverlays(intermediate_scoped_write->surface()->getCanvas(),
  1001. request->blit_request());
  1002. }
  1003. auto intermediate_image =
  1004. intermediate_scoped_write->surface()->makeImageSnapshot();
  1005. if (!intermediate_image) {
  1006. DLOG(ERROR) << "failed to retrieve `intermediate_image`.";
  1007. return;
  1008. }
  1009. // `skia::BlitRGBAToYUVA()` requires a buffer with 4 SkSurface* elements,
  1010. // let's allocate it and populate its first 2 entries with the surfaces
  1011. // obtained from |plane_access_datas|.
  1012. std::array<SkSurface*, SkYUVAInfo::kMaxPlanes> plane_surfaces = {
  1013. plane_access_datas[0].scoped_write->surface(),
  1014. plane_access_datas[1].scoped_write->surface(), nullptr, nullptr};
  1015. // The region to be populated in caller's textures is derived from blit
  1016. // request's |destination_region_offset()|, and from COR's
  1017. // |result_selection()|. If we have a blit request, use it. Otherwise, use an
  1018. // empty rect (which means that entire image will be used as the target of the
  1019. // blit - this will not result in rescaling since w/o blit request present,
  1020. // the destination image size matches the |geometry.result_selection|).
  1021. const SkRect dst_region =
  1022. request->has_blit_request()
  1023. ? gfx::RectToSkRect(
  1024. gfx::Rect(request->blit_request().destination_region_offset(),
  1025. intermediate_dst_size))
  1026. : SkRect::MakeEmpty();
  1027. // We should clear destination if BlitRequest asked to letterbox everything
  1028. // outside of intended destination region:
  1029. const bool clear_destination =
  1030. request->has_blit_request()
  1031. ? request->blit_request().letterboxing_behavior() ==
  1032. LetterboxingBehavior::kLetterbox
  1033. : false;
  1034. skia::BlitRGBAToYUVA(intermediate_image.get(), plane_surfaces.data(),
  1035. yuva_info, dst_region, clear_destination);
  1036. // Collect mailbox holders for the destination textures. They will be needed
  1037. // in case the result is kNativeTextures. It happens here in order to simplify
  1038. // the code in case we are populating the GpuMemoryBuffer-backed textures.
  1039. std::array<gpu::MailboxHolder, CopyOutputResult::kMaxPlanes>
  1040. plane_mailbox_holders = {
  1041. gpu::MailboxHolder(plane_access_datas[0].mailbox, gpu::SyncToken(),
  1042. GL_TEXTURE_2D),
  1043. gpu::MailboxHolder(plane_access_datas[1].mailbox, gpu::SyncToken(),
  1044. GL_TEXTURE_2D),
  1045. gpu::MailboxHolder(),
  1046. };
  1047. // If we are not the ones allocating the textures, they may come from a GMB,
  1048. // in which case we need to delay sending the results until we receive a
  1049. // callback that the GPU work has completed - otherwise, memory-mapping the
  1050. // GMB may not yield the latest version of the contents.
  1051. const bool should_wait_for_gpu_work =
  1052. request->result_destination() ==
  1053. CopyOutputRequest::ResultDestination::kNativeTextures &&
  1054. request->has_blit_request() &&
  1055. request->blit_request().populates_gpu_memory_buffer();
  1056. scoped_refptr<NV12PlanesReadyContext> nv12_planes_ready = nullptr;
  1057. if (should_wait_for_gpu_work) {
  1058. // Prepare a per-CopyOutputRequest context that will be responsible for
  1059. // sending the CopyOutputResult:
  1060. nv12_planes_ready = base::MakeRefCounted<NV12PlanesReadyContext>(
  1061. weak_ptr_, std::move(request), geometry.result_selection,
  1062. plane_mailbox_holders, color_space);
  1063. }
  1064. bool should_submit = false;
  1065. for (size_t i = 0; i < CopyOutputResult::kNV12MaxPlanes; ++i) {
  1066. plane_access_datas[i].representation->SetCleared();
  1067. should_submit |= !plane_access_datas[i].end_semaphores.empty();
  1068. // Prepare a per-plane context that will notify the per-request context that
  1069. // GPU work that produces the contents of a plane that the GPU-side of the
  1070. // work has completed.
  1071. std::unique_ptr<NV12SinglePlaneReadyContext> nv12_plane_ready =
  1072. should_wait_for_gpu_work
  1073. ? std::make_unique<NV12SinglePlaneReadyContext>(nv12_planes_ready)
  1074. : nullptr;
  1075. if (should_wait_for_gpu_work) {
  1076. // Treat the fact that we're waiting for GPU work to finish the same way
  1077. // as a readback request. This would allow us to nudge Skia to fire the
  1078. // callbacks. See `SkiaOutputSurfaceImplOnGpu::CheckReadbackCompletion()`.
  1079. ++num_readbacks_pending_;
  1080. }
  1081. if (!FlushSurface(
  1082. plane_surfaces[i], plane_access_datas[i].end_semaphores,
  1083. plane_access_datas[i].scoped_write->TakeEndState(),
  1084. should_wait_for_gpu_work
  1085. ? &NV12SinglePlaneReadyContext::OnNV12PlaneReady
  1086. : nullptr,
  1087. should_wait_for_gpu_work ? nv12_plane_ready.release() : nullptr)) {
  1088. // TODO(penghuang): handle vulkan device lost.
  1089. FailedSkiaFlush("CopyOutputNV12 plane_surfaces[i]->flush()");
  1090. return;
  1091. }
  1092. }
  1093. should_submit |= !end_semaphores.empty();
  1094. intermediate_representation->SetCleared();
  1095. if (!FlushSurface(intermediate_scoped_write->surface(), end_semaphores,
  1096. intermediate_scoped_write->TakeEndState())) {
  1097. // TODO(penghuang): handle vulkan device lost.
  1098. FailedSkiaFlush("CopyOutputNV12 dest_surface->flush()");
  1099. return;
  1100. }
  1101. if (should_submit && !gr_context()->submit()) {
  1102. DLOG(ERROR) << "CopyOutputNV12 gr_context->submit() failed";
  1103. return;
  1104. }
  1105. if (should_wait_for_gpu_work) {
  1106. // Flow will continue after GPU work is done - see
  1107. // `NV12PlanesReadyContext::OnNV12PlaneReady()` that eventually gets called.
  1108. return;
  1109. }
  1110. // We conditionally move from request (if `should_wait_for_gpu_work` is true),
  1111. // DCHECK that we don't accidentally enter this codepath after the request was
  1112. // moved from.
  1113. DCHECK(request);
  1114. switch (request->result_destination()) {
  1115. case CopyOutputRequest::ResultDestination::kNativeTextures: {
  1116. CopyOutputResult::ReleaseCallbacks release_callbacks;
  1117. if (!request->has_blit_request()) {
  1118. // In blit requests, we are not responsible for releasing the textures
  1119. // (the issuer of the request owns them), create the callbacks only if
  1120. // we don't have blit request:
  1121. for (size_t i = 0; i < CopyOutputResult::kNV12MaxPlanes; ++i) {
  1122. release_callbacks.push_back(
  1123. CreateDestroyCopyOutputResourcesOnGpuThreadCallback(
  1124. std::move(plane_access_datas[i].representation)));
  1125. }
  1126. }
  1127. request->SendResult(std::make_unique<CopyOutputTextureResult>(
  1128. CopyOutputResult::Format::NV12_PLANES, geometry.result_selection,
  1129. CopyOutputResult::TextureResult(plane_mailbox_holders, color_space),
  1130. std::move(release_callbacks)));
  1131. break;
  1132. }
  1133. case CopyOutputRequest::ResultDestination::kSystemMemory: {
  1134. auto nv12_readback = base::MakeRefCounted<NV12PlanesReadbackContext>(
  1135. weak_ptr_, std::move(request), geometry.result_selection);
  1136. // Issue readbacks from the surfaces:
  1137. for (size_t i = 0; i < CopyOutputResult::kNV12MaxPlanes; ++i) {
  1138. SkImageInfo dst_info = SkImageInfo::Make(
  1139. plane_access_datas[i].size,
  1140. (i == 0) ? kAlpha_8_SkColorType : kR8G8_unorm_SkColorType,
  1141. kUnpremul_SkAlphaType);
  1142. auto context =
  1143. std::make_unique<NV12PlanePixelReadContext>(nv12_readback, i);
  1144. num_readbacks_pending_++;
  1145. plane_surfaces[i]->asyncRescaleAndReadPixels(
  1146. dst_info, SkIRect::MakeSize(plane_access_datas[i].size),
  1147. SkSurface::RescaleGamma::kSrc,
  1148. SkSurface::RescaleMode::kRepeatedLinear,
  1149. &CopyOutputResultSkiaNV12::OnNV12PlaneReadbackDone,
  1150. context.release());
  1151. }
  1152. break;
  1153. }
  1154. }
  1155. }
  1156. ReleaseCallback
  1157. SkiaOutputSurfaceImplOnGpu::CreateDestroyCopyOutputResourcesOnGpuThreadCallback(
  1158. std::unique_ptr<gpu::SkiaImageRepresentation> representation) {
  1159. copy_output_images_.push_back(std::move(representation));
  1160. auto closure_on_gpu_thread = base::BindOnce(
  1161. &SkiaOutputSurfaceImplOnGpu::DestroyCopyOutputResourcesOnGpuThread,
  1162. weak_ptr_, copy_output_images_.back()->mailbox());
  1163. // The destruction sequence for the textures cached by |copy_output_images_|
  1164. // is as follows:
  1165. // 1) The ReleaseCallback returned here can be invoked on any thread. When
  1166. // invoked, we post a task to the client thread with sync token
  1167. // dependencies that must be met before the texture can be released.
  1168. // 2) When this task runs on the Viz thread, it will retain the closure above
  1169. // until the next draw (for WebView). At the next draw, the Viz thread
  1170. // synchronously waits to satisfy the sync token dependencies.
  1171. // 3) Once the step above finishes, the closure is dispatched on the GPU
  1172. // thread (or render thread on WebView).
  1173. ReleaseCallback release_callback = base::BindOnce(
  1174. [](ScheduleGpuTaskCallback schedule_gpu_task, base::OnceClosure callback,
  1175. const gpu::SyncToken& sync_token,
  1176. bool) { schedule_gpu_task.Run(std::move(callback), {sync_token}); },
  1177. schedule_gpu_task_, std::move(closure_on_gpu_thread));
  1178. return base::BindPostTask(dependency_->GetClientTaskRunner(),
  1179. std::move(release_callback));
  1180. }
  1181. void SkiaOutputSurfaceImplOnGpu::DestroyCopyOutputResourcesOnGpuThread(
  1182. const gpu::Mailbox& mailbox) {
  1183. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  1184. for (size_t i = 0; i < copy_output_images_.size(); ++i) {
  1185. if (copy_output_images_[i]->mailbox() == mailbox) {
  1186. context_state_->MakeCurrent(nullptr);
  1187. copy_output_images_.erase(copy_output_images_.begin() + i);
  1188. return;
  1189. }
  1190. }
  1191. NOTREACHED() << "The Callback returned by GetDeleteCallback() was called "
  1192. << "more than once.";
  1193. }
  1194. void SkiaOutputSurfaceImplOnGpu::CopyOutput(
  1195. AggregatedRenderPassId id,
  1196. const copy_output::RenderPassGeometry& geometry,
  1197. const gfx::ColorSpace& color_space,
  1198. std::unique_ptr<CopyOutputRequest> request,
  1199. const gpu::Mailbox& mailbox) {
  1200. TRACE_EVENT0("viz", "SkiaOutputSurfaceImplOnGpu::CopyOutput");
  1201. // TODO(https://crbug.com/898595): Do this on the GPU instead of CPU with
  1202. // Vulkan.
  1203. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  1204. if (context_is_lost_)
  1205. return;
  1206. bool from_framebuffer = !id;
  1207. DCHECK(scoped_output_device_paint_ || !from_framebuffer);
  1208. SkSurface* surface;
  1209. std::unique_ptr<gpu::SkiaImageRepresentation> backing_representation;
  1210. std::unique_ptr<gpu::SkiaImageRepresentation::ScopedWriteAccess>
  1211. scoped_access;
  1212. std::vector<GrBackendSemaphore> begin_semaphores;
  1213. std::vector<GrBackendSemaphore> end_semaphores;
  1214. std::unique_ptr<GrBackendSurfaceMutableState> end_state;
  1215. if (from_framebuffer) {
  1216. surface = scoped_output_device_paint_->sk_surface();
  1217. } else {
  1218. backing_representation = shared_image_representation_factory_->ProduceSkia(
  1219. mailbox, context_state_.get());
  1220. DCHECK(backing_representation);
  1221. SkSurfaceProps surface_props{0, kUnknown_SkPixelGeometry};
  1222. // TODO(https://crbug.com/1226672): Use BeginScopedReadAccess instead
  1223. scoped_access = backing_representation->BeginScopedWriteAccess(
  1224. /*final_msaa_count=*/1, surface_props, &begin_semaphores,
  1225. &end_semaphores,
  1226. gpu::SharedImageRepresentation::AllowUnclearedAccess::kNo);
  1227. surface = scoped_access->surface();
  1228. end_state = scoped_access->TakeEndState();
  1229. if (!begin_semaphores.empty()) {
  1230. auto result =
  1231. surface->wait(begin_semaphores.size(), begin_semaphores.data(),
  1232. /*deleteSemaphoresAfterWait=*/false);
  1233. DCHECK(result);
  1234. }
  1235. }
  1236. // Do not support reading back from vulkan secondary command buffer.
  1237. if (!surface)
  1238. return;
  1239. // If a platform doesn't support RGBX_8888 format, we will use RGBA_8888
  1240. // instead. In this case, we need discard alpha channel (modify the alpha
  1241. // value to 0xff, but keep other channel not changed).
  1242. bool need_discard_alpha =
  1243. from_framebuffer && (output_device_->is_emulated_rgbx());
  1244. if (need_discard_alpha) {
  1245. absl::optional<gpu::raster::GrShaderCache::ScopedCacheUse> cache_use;
  1246. if (dependency_->GetGrShaderCache()) {
  1247. cache_use.emplace(dependency_->GetGrShaderCache(),
  1248. gpu::kDisplayCompositorClientId);
  1249. }
  1250. SkPaint paint;
  1251. paint.setColor(SK_ColorBLACK);
  1252. paint.setBlendMode(SkBlendMode::kDstATop);
  1253. surface->getCanvas()->drawPaint(paint);
  1254. surface->flush();
  1255. }
  1256. absl::optional<gpu::raster::GrShaderCache::ScopedCacheUse> cache_use;
  1257. if (dependency_->GetGrShaderCache()) {
  1258. cache_use.emplace(dependency_->GetGrShaderCache(),
  1259. gpu::kDisplayCompositorClientId);
  1260. }
  1261. // For downscaling, use the GOOD quality setting (appropriate for
  1262. // thumbnailing); and, for upscaling, use the BEST quality.
  1263. const bool is_downscale_or_identity_in_both_dimensions =
  1264. request->scale_to().x() <= request->scale_from().x() &&
  1265. request->scale_to().y() <= request->scale_from().y();
  1266. const SkSurface::RescaleMode rescale_mode =
  1267. is_downscale_or_identity_in_both_dimensions
  1268. ? SkSurface::RescaleMode::kRepeatedLinear
  1269. : SkSurface::RescaleMode::kRepeatedCubic;
  1270. // Compute |source_selection| as a workaround to support |result_selection|
  1271. // with Skia readback. |result_selection| is a clip rect specified in the
  1272. // destination pixel space. By transforming |result_selection| back to the
  1273. // source pixel space we can compute what rectangle to sample from.
  1274. //
  1275. // This might introduce some rounding error if destination pixel space is
  1276. // scaled up from the source pixel space. When scaling |result_selection| back
  1277. // down it might not be pixel aligned.
  1278. gfx::Rect source_selection = geometry.sampling_bounds;
  1279. if (request->has_result_selection()) {
  1280. gfx::Rect sampling_selection = request->result_selection();
  1281. if (request->is_scaled()) {
  1282. // Invert the scaling.
  1283. sampling_selection = copy_output::ComputeResultRect(
  1284. sampling_selection, request->scale_to(), request->scale_from());
  1285. }
  1286. sampling_selection.Offset(source_selection.OffsetFromOrigin());
  1287. source_selection.Intersect(sampling_selection);
  1288. }
  1289. SkIRect src_rect =
  1290. SkIRect::MakeXYWH(source_selection.x(), source_selection.y(),
  1291. source_selection.width(), source_selection.height());
  1292. switch (request->result_format()) {
  1293. case CopyOutputRequest::ResultFormat::I420_PLANES: {
  1294. DCHECK_EQ(geometry.result_selection.width() % 2, 0)
  1295. << "SkSurface::asyncRescaleAndReadPixelsYUV420() requires "
  1296. "destination width to be even!";
  1297. DCHECK_EQ(geometry.result_selection.height() % 2, 0)
  1298. << "SkSurface::asyncRescaleAndReadPixelsYUV420() requires "
  1299. "destination height to be even!";
  1300. std::unique_ptr<ReadPixelsContext> context =
  1301. std::make_unique<ReadPixelsContext>(std::move(request),
  1302. geometry.result_selection,
  1303. color_space, weak_ptr_);
  1304. // Skia readback could be synchronous. Incremement counter in case
  1305. // ReadbackCompleted is called immediately.
  1306. num_readbacks_pending_++;
  1307. surface->asyncRescaleAndReadPixelsYUV420(
  1308. kRec709_SkYUVColorSpace, SkColorSpace::MakeSRGB(), src_rect,
  1309. {geometry.result_selection.width(),
  1310. geometry.result_selection.height()},
  1311. SkSurface::RescaleGamma::kSrc, rescale_mode,
  1312. &CopyOutputResultSkiaYUV::OnReadbackDone, context.release());
  1313. break;
  1314. }
  1315. case CopyOutputRequest::ResultFormat::NV12_PLANES: {
  1316. CopyOutputNV12(surface, geometry, color_space, src_rect, rescale_mode,
  1317. is_downscale_or_identity_in_both_dimensions,
  1318. std::move(request));
  1319. break;
  1320. }
  1321. case CopyOutputRequest::ResultFormat::RGBA: {
  1322. CopyOutputRGBA(surface, geometry, color_space, src_rect, rescale_mode,
  1323. is_downscale_or_identity_in_both_dimensions,
  1324. std::move(request));
  1325. break;
  1326. }
  1327. }
  1328. if (!FlushSurface(surface, end_semaphores, std::move(end_state))) {
  1329. // TODO(penghuang): handle vulkan device lost.
  1330. FailedSkiaFlush("surface->flush() failed.");
  1331. return;
  1332. }
  1333. ScheduleCheckReadbackCompletion();
  1334. }
  1335. void SkiaOutputSurfaceImplOnGpu::BeginAccessImages(
  1336. const std::vector<ImageContextImpl*>& image_contexts,
  1337. std::vector<GrBackendSemaphore>* begin_semaphores,
  1338. std::vector<GrBackendSemaphore>* end_semaphores) {
  1339. TRACE_EVENT0("viz", "SkiaOutputSurfaceImplOnGpu::BeginAccessImages");
  1340. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  1341. bool is_gl = gpu_preferences_.gr_context_type == gpu::GrContextType::kGL;
  1342. for (auto* context : image_contexts) {
  1343. // Prepare for accessing render pass.
  1344. context->BeginAccessIfNecessary(
  1345. context_state_.get(), shared_image_representation_factory_.get(),
  1346. dependency_->GetMailboxManager(), begin_semaphores, end_semaphores);
  1347. if (auto end_state = context->TakeAccessEndState())
  1348. image_contexts_with_end_access_state_.emplace(context,
  1349. std::move(end_state));
  1350. // Texture parameters can be modified by concurrent reads so reset them
  1351. // before compositing from the texture. See https://crbug.com/1092080.
  1352. if (is_gl && context->maybe_concurrent_reads()) {
  1353. auto* promise_texture = context->promise_image_texture();
  1354. if (promise_texture) {
  1355. GrBackendTexture backend_texture = promise_texture->backendTexture();
  1356. backend_texture.glTextureParametersModified();
  1357. }
  1358. }
  1359. }
  1360. }
  1361. void SkiaOutputSurfaceImplOnGpu::ResetStateOfImages() {
  1362. for (auto& context : image_contexts_with_end_access_state_) {
  1363. if (!gr_context()->setBackendTextureState(
  1364. context.first->promise_image_texture()->backendTexture(),
  1365. *context.second)) {
  1366. DLOG(ERROR) << "setBackendTextureState() failed.";
  1367. }
  1368. }
  1369. image_contexts_with_end_access_state_.clear();
  1370. }
  1371. void SkiaOutputSurfaceImplOnGpu::EndAccessImages(
  1372. const base::flat_set<ImageContextImpl*>& image_contexts) {
  1373. TRACE_EVENT0("viz", "SkiaOutputSurfaceImplOnGpu::EndAccessImages");
  1374. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  1375. DCHECK(image_contexts_with_end_access_state_.empty());
  1376. for (auto* context : image_contexts)
  1377. context->EndAccessIfNecessary();
  1378. }
  1379. sk_sp<GrContextThreadSafeProxy>
  1380. SkiaOutputSurfaceImplOnGpu::GetGrContextThreadSafeProxy() {
  1381. return gr_context() ? gr_context()->threadSafeProxy() : nullptr;
  1382. }
  1383. void SkiaOutputSurfaceImplOnGpu::ReleaseImageContexts(
  1384. std::vector<std::unique_ptr<ExternalUseClient::ImageContext>>
  1385. image_contexts) {
  1386. DCHECK(!image_contexts.empty());
  1387. // The window could be destroyed already, and the MakeCurrent will fail with
  1388. // an destroyed window, so MakeCurrent without requiring the fbo0.
  1389. if (context_is_lost_) {
  1390. for (const auto& context : image_contexts)
  1391. context->OnContextLost();
  1392. }
  1393. image_contexts.clear();
  1394. }
  1395. void SkiaOutputSurfaceImplOnGpu::ScheduleOverlays(
  1396. SkiaOutputSurface::OverlayList overlays) {
  1397. overlays_ = std::move(overlays);
  1398. }
  1399. void SkiaOutputSurfaceImplOnGpu::SetEnableDCLayers(bool enable) {
  1400. if (context_is_lost_)
  1401. return;
  1402. output_device_->SetEnableDCLayers(enable);
  1403. }
  1404. void SkiaOutputSurfaceImplOnGpu::SetGpuVSyncEnabled(bool enabled) {
  1405. output_device_->SetGpuVSyncEnabled(enabled);
  1406. }
  1407. void SkiaOutputSurfaceImplOnGpu::SetFrameRate(float frame_rate) {
  1408. if (gl_surface_)
  1409. gl_surface_->SetFrameRate(frame_rate);
  1410. }
  1411. void SkiaOutputSurfaceImplOnGpu::SetCapabilitiesForTesting(
  1412. const OutputSurface::Capabilities& capabilities) {
  1413. // Check that we're using an offscreen surface.
  1414. DCHECK(dependency_->IsOffscreen());
  1415. output_device_ = std::make_unique<SkiaOutputDeviceOffscreen>(
  1416. context_state_, capabilities.output_surface_origin,
  1417. renderer_settings_.requires_alpha_channel,
  1418. shared_gpu_deps_->memory_tracker(), GetDidSwapBuffersCompleteCallback());
  1419. }
  1420. bool SkiaOutputSurfaceImplOnGpu::Initialize() {
  1421. TRACE_EVENT1("viz", "SkiaOutputSurfaceImplOnGpu::Initialize",
  1422. "is_using_vulkan", is_using_vulkan());
  1423. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  1424. #if defined(USE_OZONE)
  1425. gpu::SurfaceHandle surface_handle = dependency_->GetSurfaceHandle();
  1426. if (surface_handle != gpu::kNullSurfaceHandle) {
  1427. window_surface_ = ui::OzonePlatform::GetInstance()
  1428. ->GetSurfaceFactoryOzone()
  1429. ->CreatePlatformWindowSurface(surface_handle);
  1430. }
  1431. #endif
  1432. context_state_ = dependency_->GetSharedContextState();
  1433. DCHECK(context_state_);
  1434. if (!context_state_->gr_context()) {
  1435. DLOG(ERROR) << "Failed to create GrContext";
  1436. return false;
  1437. }
  1438. if (is_using_vulkan()) {
  1439. if (!InitializeForVulkan())
  1440. return false;
  1441. } else if (is_using_dawn()) {
  1442. if (!InitializeForDawn())
  1443. return false;
  1444. } else {
  1445. if (!InitializeForGL())
  1446. return false;
  1447. }
  1448. max_resource_cache_bytes_ =
  1449. context_state_->gr_context()->getResourceCacheLimit();
  1450. if (context_state_)
  1451. context_state_->AddContextLostObserver(this);
  1452. return true;
  1453. }
  1454. bool SkiaOutputSurfaceImplOnGpu::InitializeForGL() {
  1455. gl::GLSurfaceFormat format;
  1456. if (PreferRGB565ResourcesForDisplay() &&
  1457. !renderer_settings_.requires_alpha_channel) {
  1458. format.SetRGB565();
  1459. }
  1460. if (dependency_->IsOffscreen()) {
  1461. gl_surface_ = dependency_->CreateGLSurface(nullptr, format);
  1462. if (!gl_surface_)
  1463. return false;
  1464. output_device_ = std::make_unique<SkiaOutputDeviceOffscreen>(
  1465. context_state_, gfx::SurfaceOrigin::kTopLeft,
  1466. renderer_settings_.requires_alpha_channel,
  1467. shared_gpu_deps_->memory_tracker(),
  1468. GetDidSwapBuffersCompleteCallback());
  1469. } else {
  1470. gl_surface_ =
  1471. dependency_->CreateGLSurface(weak_ptr_factory_.GetWeakPtr(), format);
  1472. if (!gl_surface_)
  1473. return false;
  1474. if (MakeCurrent(/*need_framebuffer=*/true)) {
  1475. if (gl_surface_->IsSurfaceless()) {
  1476. #if defined(USE_OZONE)
  1477. [[maybe_unused]] bool needs_background_image =
  1478. ui::OzonePlatform::GetInstance()
  1479. ->GetPlatformRuntimeProperties()
  1480. .needs_background_image;
  1481. [[maybe_unused]] bool supports_non_backed_solid_color_images =
  1482. ui::OzonePlatform::GetInstance()
  1483. ->GetPlatformRuntimeProperties()
  1484. .supports_non_backed_solid_color_buffers;
  1485. #else // defined(USE_OZONE)
  1486. [[maybe_unused]] bool needs_background_image = false;
  1487. [[maybe_unused]] bool supports_non_backed_solid_color_images = false;
  1488. #endif // !defined(USE_OZONE)
  1489. #if !BUILDFLAG(IS_WIN)
  1490. output_device_ = std::make_unique<SkiaOutputDeviceBufferQueue>(
  1491. std::make_unique<OutputPresenterGL>(
  1492. gl_surface_, dependency_, shared_image_factory_.get(),
  1493. shared_image_representation_factory_.get()),
  1494. dependency_, shared_image_representation_factory_.get(),
  1495. shared_gpu_deps_->memory_tracker(),
  1496. GetDidSwapBuffersCompleteCallback(), needs_background_image,
  1497. supports_non_backed_solid_color_images);
  1498. #else // !BUILDFLAG(IS_WIN)
  1499. NOTIMPLEMENTED();
  1500. #endif // BUILDFLAG(IS_WIN)
  1501. } else {
  1502. if (dependency_->NeedsSupportForExternalStencil()) {
  1503. output_device_ = std::make_unique<SkiaOutputDeviceWebView>(
  1504. context_state_.get(), gl_surface_,
  1505. shared_gpu_deps_->memory_tracker(),
  1506. GetDidSwapBuffersCompleteCallback());
  1507. } else {
  1508. output_device_ = std::make_unique<SkiaOutputDeviceGL>(
  1509. dependency_->GetMailboxManager(),
  1510. shared_image_representation_factory_.get(), context_state_.get(),
  1511. gl_surface_, feature_info_, shared_gpu_deps_->memory_tracker(),
  1512. GetDidSwapBuffersCompleteCallback());
  1513. }
  1514. }
  1515. } else {
  1516. gl_surface_ = nullptr;
  1517. context_state_ = nullptr;
  1518. LOG(ERROR) << "Failed to make current during initialization.";
  1519. return false;
  1520. }
  1521. }
  1522. DCHECK_EQ(gl_surface_->IsOffscreen(), dependency_->IsOffscreen());
  1523. return true;
  1524. }
  1525. #if BUILDFLAG(ENABLE_VULKAN)
  1526. bool SkiaOutputSurfaceImplOnGpu::InitializeForVulkan() {
  1527. if (dependency_->IsOffscreen()) {
  1528. output_device_ = std::make_unique<SkiaOutputDeviceOffscreen>(
  1529. context_state_, gfx::SurfaceOrigin::kBottomLeft,
  1530. renderer_settings_.requires_alpha_channel,
  1531. shared_gpu_deps_->memory_tracker(),
  1532. GetDidSwapBuffersCompleteCallback());
  1533. return true;
  1534. }
  1535. #if BUILDFLAG(IS_ANDROID)
  1536. if (vulkan_context_provider_->GetGrSecondaryCBDrawContext()) {
  1537. output_device_ = std::make_unique<SkiaOutputDeviceVulkanSecondaryCB>(
  1538. vulkan_context_provider_, shared_gpu_deps_->memory_tracker(),
  1539. GetDidSwapBuffersCompleteCallback());
  1540. return true;
  1541. }
  1542. #endif
  1543. #if defined(USE_OZONE)
  1544. [[maybe_unused]] bool needs_background_image =
  1545. ui::OzonePlatform::GetInstance()
  1546. ->GetPlatformRuntimeProperties()
  1547. .needs_background_image;
  1548. [[maybe_unused]] bool supports_non_backed_solid_color_images =
  1549. ui::OzonePlatform::GetInstance()
  1550. ->GetPlatformRuntimeProperties()
  1551. .supports_non_backed_solid_color_buffers;
  1552. #else // defined(USE_OZONE)
  1553. [[maybe_unused]] bool needs_background_image = false;
  1554. [[maybe_unused]] bool supports_non_backed_solid_color_images = false;
  1555. #endif // !defined(USE_OZONE)
  1556. #if !BUILDFLAG(IS_WIN)
  1557. #if BUILDFLAG(IS_FUCHSIA)
  1558. auto output_presenter = OutputPresenterFuchsia::Create(
  1559. window_surface_.get(), dependency_, shared_image_factory_.get(),
  1560. shared_image_representation_factory_.get());
  1561. #else
  1562. auto output_presenter =
  1563. OutputPresenterGL::Create(dependency_, shared_image_factory_.get(),
  1564. shared_image_representation_factory_.get());
  1565. if (output_presenter) {
  1566. // TODO(https://crbug.com/1012401): don't depend on GL.
  1567. gl_surface_ = output_presenter->gl_surface();
  1568. }
  1569. #endif
  1570. if (output_presenter) {
  1571. output_device_ = std::make_unique<SkiaOutputDeviceBufferQueue>(
  1572. std::move(output_presenter), dependency_,
  1573. shared_image_representation_factory_.get(),
  1574. shared_gpu_deps_->memory_tracker(), GetDidSwapBuffersCompleteCallback(),
  1575. needs_background_image, supports_non_backed_solid_color_images);
  1576. return true;
  1577. }
  1578. #endif // !BUILDFLAG(IS_WIN)
  1579. std::unique_ptr<SkiaOutputDeviceVulkan> output_device;
  1580. if (!gpu_preferences_.disable_vulkan_surface) {
  1581. output_device = SkiaOutputDeviceVulkan::Create(
  1582. vulkan_context_provider_, dependency_->GetSurfaceHandle(),
  1583. shared_gpu_deps_->memory_tracker(),
  1584. GetDidSwapBuffersCompleteCallback());
  1585. }
  1586. if (MayFallBackToSkiaOutputDeviceX11()) {
  1587. #if defined(USE_OZONE_PLATFORM_X11)
  1588. if (output_device) {
  1589. output_device_ = std::move(output_device);
  1590. } else {
  1591. output_device_ = SkiaOutputDeviceX11::Create(
  1592. context_state_, dependency_->GetSurfaceHandle(),
  1593. shared_gpu_deps_->memory_tracker(),
  1594. GetDidSwapBuffersCompleteCallback());
  1595. }
  1596. if (output_device_)
  1597. return true;
  1598. #endif // BUILDFLAG(OZONE_PLATFORM_X11)
  1599. }
  1600. if (!output_device)
  1601. return false;
  1602. #if BUILDFLAG(IS_WIN)
  1603. gpu::SurfaceHandle child_surface = output_device->GetChildSurfaceHandle();
  1604. if (child_surface != gpu::kNullSurfaceHandle) {
  1605. DidCreateAcceleratedSurfaceChildWindow(dependency_->GetSurfaceHandle(),
  1606. child_surface);
  1607. }
  1608. #endif // BUILDFLAG(IS_WIN)
  1609. output_device_ = std::move(output_device);
  1610. return true;
  1611. }
  1612. #else // BUILDFLAG(ENABLE_VULKAN)
  1613. bool SkiaOutputSurfaceImplOnGpu::InitializeForVulkan() {
  1614. return false;
  1615. }
  1616. #endif // !BUILDFLAG(ENABLE_VULKAN)
  1617. bool SkiaOutputSurfaceImplOnGpu::InitializeForDawn() {
  1618. #if BUILDFLAG(SKIA_USE_DAWN)
  1619. if (dependency_->IsOffscreen()) {
  1620. output_device_ = std::make_unique<SkiaOutputDeviceOffscreen>(
  1621. context_state_, gfx::SurfaceOrigin::kBottomLeft,
  1622. renderer_settings_.requires_alpha_channel,
  1623. shared_gpu_deps_->memory_tracker(),
  1624. GetDidSwapBuffersCompleteCallback());
  1625. } else {
  1626. #if defined(USE_OZONE_PLATFORM_X11)
  1627. // TODO(rivr): Set up a Vulkan swapchain so that Linux can also use
  1628. // SkiaOutputDeviceDawn.
  1629. if (MayFallBackToSkiaOutputDeviceX11()) {
  1630. output_device_ = SkiaOutputDeviceX11::Create(
  1631. context_state_, dependency_->GetSurfaceHandle(),
  1632. shared_gpu_deps_->memory_tracker(),
  1633. GetDidSwapBuffersCompleteCallback());
  1634. }
  1635. #elif BUILDFLAG(IS_WIN)
  1636. std::unique_ptr<SkiaOutputDeviceDawn> output_device =
  1637. std::make_unique<SkiaOutputDeviceDawn>(
  1638. dawn_context_provider_, dependency_->GetSurfaceHandle(),
  1639. gfx::SurfaceOrigin::kTopLeft, shared_gpu_deps_->memory_tracker(),
  1640. GetDidSwapBuffersCompleteCallback());
  1641. const gpu::SurfaceHandle child_surface_handle =
  1642. output_device->GetChildSurfaceHandle();
  1643. DidCreateAcceleratedSurfaceChildWindow(dependency_->GetSurfaceHandle(),
  1644. child_surface_handle);
  1645. output_device_ = std::move(output_device);
  1646. #else
  1647. NOTREACHED();
  1648. return false;
  1649. #endif
  1650. }
  1651. #endif
  1652. return !!output_device_;
  1653. }
  1654. bool SkiaOutputSurfaceImplOnGpu::MakeCurrent(bool need_framebuffer) {
  1655. // If GL is not being used or GLSurface is not surfaceless, we can ignore
  1656. // making current the GLSurface for better performance.
  1657. bool need_fbo0 = need_framebuffer && context_state_->GrContextIsGL() &&
  1658. gl_surface_ && !gl_surface_->IsSurfaceless();
  1659. // need_fbo0 implies need_gl too.
  1660. bool need_gl = need_fbo0;
  1661. // Only make current with |gl_surface_|, if following operations will use
  1662. // fbo0.
  1663. auto* gl_surface = need_fbo0 ? gl_surface_.get() : nullptr;
  1664. if (!context_state_->MakeCurrent(gl_surface, need_gl)) {
  1665. LOG(ERROR) << "Failed to make current.";
  1666. dependency_->DidLoseContext(
  1667. *context_state_->context_lost_reason(),
  1668. GURL("chrome://gpu/SkiaOutputSurfaceImplOnGpu::MakeCurrent"));
  1669. MarkContextLost(GetContextLostReason(
  1670. gpu::error::kLostContext, *context_state_->context_lost_reason()));
  1671. return false;
  1672. }
  1673. // Some GLSurface implements OnMakeCurrent() to tracing current GLContext,
  1674. // even if framebuffer is not needed, we still call OnMakeCurrent() so
  1675. // GLSurface implementation will know the current GLContext.
  1676. if (gl_surface_ && !need_fbo0)
  1677. gl_surface_->OnMakeCurrent(context_state_->context());
  1678. context_state_->set_need_context_state_reset(true);
  1679. return true;
  1680. }
  1681. void SkiaOutputSurfaceImplOnGpu::ReleaseFenceSync(uint64_t sync_fence_release) {
  1682. sync_point_client_state_->ReleaseFenceSync(sync_fence_release);
  1683. }
  1684. void SkiaOutputSurfaceImplOnGpu::SwapBuffersInternal(
  1685. absl::optional<OutputSurfaceFrame> frame) {
  1686. DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  1687. DCHECK(output_device_);
  1688. if (context_is_lost_)
  1689. return;
  1690. if (gl_surface_ && frame) {
  1691. gl_surface_->SetChoreographerVsyncIdForNextFrame(
  1692. frame->choreographer_vsync_id);
  1693. if (frame->delegated_ink_metadata) {
  1694. gl_surface_->SetDelegatedInkTrailStartPoint(
  1695. std::move(frame->delegated_ink_metadata));
  1696. }
  1697. }
  1698. bool sync_cpu =
  1699. gpu::ShouldVulkanSyncCpuForSkiaSubmit(vulkan_context_provider_);
  1700. ResetStateOfImages();
  1701. output_device_->Submit(
  1702. sync_cpu, base::BindOnce(&SkiaOutputSurfaceImplOnGpu::PostSubmit,
  1703. base::Unretained(this), std::move(frame)));
  1704. }
  1705. void SkiaOutputSurfaceImplOnGpu::PostSubmit(
  1706. absl::optional<OutputSurfaceFrame> frame) {
  1707. promise_image_access_helper_.EndAccess();
  1708. scoped_output_device_paint_.reset();
  1709. #if BUILDFLAG(ENABLE_VULKAN)
  1710. while (!pending_release_fence_cbs_.empty()) {
  1711. auto& item = pending_release_fence_cbs_.front();
  1712. auto release_fence = CreateReleaseFenceForVulkan(item.first);
  1713. if (release_fence.is_null())
  1714. LOG(ERROR) << "Unable to create a release fence for Vulkan.";
  1715. PostTaskToClientThread(
  1716. base::BindOnce(std::move(item.second), std::move(release_fence)));
  1717. pending_release_fence_cbs_.pop_front();
  1718. }
  1719. #else
  1720. DCHECK(pending_release_fence_cbs_.empty());
  1721. #endif
  1722. if (frame) {
  1723. if (waiting_for_full_damage_) {
  1724. // If we're using partial swap, we need to check whether the sub-buffer
  1725. // rect is actually the entire screen, but otherwise, the damage is
  1726. // always the full surface.
  1727. if (frame->sub_buffer_rect &&
  1728. capabilities().supports_post_sub_buffer &&
  1729. frame->sub_buffer_rect->size() != size_) {
  1730. output_device_->SwapBuffersSkipped(buffer_presented_callback_,
  1731. std::move(*frame));
  1732. output_surface_plane_.reset();
  1733. destroy_after_swap_.clear();
  1734. return;
  1735. }
  1736. waiting_for_full_damage_ = false;
  1737. }
  1738. if (output_surface_plane_)
  1739. DCHECK(output_device_->IsPrimaryPlaneOverlay());
  1740. if (frame->sub_buffer_rect) {
  1741. if (capabilities().supports_post_sub_buffer) {
  1742. if (capabilities().output_surface_origin ==
  1743. gfx::SurfaceOrigin::kBottomLeft) {
  1744. frame->sub_buffer_rect->set_y(size_.height() -
  1745. frame->sub_buffer_rect->y() -
  1746. frame->sub_buffer_rect->height());
  1747. }
  1748. }
  1749. if (output_surface_plane_)
  1750. output_surface_plane_->damage_rect = frame->sub_buffer_rect;
  1751. }
  1752. if (overlays_.size()) {
  1753. TRACE_EVENT1("viz", "SkiaOutputDevice->ScheduleOverlays()",
  1754. "num_overlays", overlays_.size());
  1755. constexpr base::TimeDelta kHistogramMinTime = base::Microseconds(5);
  1756. constexpr base::TimeDelta kHistogramMaxTime = base::Milliseconds(16);
  1757. constexpr int kHistogramTimeBuckets = 50;
  1758. base::TimeTicks start_time = base::TimeTicks::Now();
  1759. output_device_->ScheduleOverlays(std::move(overlays_));
  1760. UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES(
  1761. "Gpu.OutputSurface.ScheduleOverlaysUs",
  1762. base::TimeTicks::Now() - start_time, kHistogramMinTime,
  1763. kHistogramMaxTime, kHistogramTimeBuckets);
  1764. }
  1765. output_device_->SetViewportSize(frame->size);
  1766. output_device_->SchedulePrimaryPlane(output_surface_plane_);
  1767. if (frame->sub_buffer_rect) {
  1768. if (capabilities().supports_post_sub_buffer) {
  1769. output_device_->PostSubBuffer(*frame->sub_buffer_rect,
  1770. buffer_presented_callback_,
  1771. std::move(*frame));
  1772. } else if (capabilities().supports_commit_overlay_planes) {
  1773. // CommitOverlayPlanes() can only be used for empty swap.
  1774. DCHECK(frame->sub_buffer_rect->IsEmpty());
  1775. output_device_->CommitOverlayPlanes(buffer_presented_callback_,
  1776. std::move(*frame));
  1777. } else {
  1778. NOTREACHED();
  1779. }
  1780. } else {
  1781. output_device_->SwapBuffers(buffer_presented_callback_,
  1782. std::move(*frame));
  1783. }
  1784. }
  1785. // Reset the overlay plane information even on skipped swap.
  1786. output_surface_plane_.reset();
  1787. overlays_.clear();
  1788. destroy_after_swap_.clear();
  1789. context_state_->UpdateSkiaOwnedMemorySize();
  1790. }
  1791. bool SkiaOutputSurfaceImplOnGpu::IsDisplayedAsOverlay() {
  1792. return output_device_->IsPrimaryPlaneOverlay();
  1793. }
  1794. #if BUILDFLAG(IS_WIN)
  1795. void SkiaOutputSurfaceImplOnGpu::DidCreateAcceleratedSurfaceChildWindow(
  1796. gpu::SurfaceHandle parent_window,
  1797. gpu::SurfaceHandle child_window) {
  1798. dependency_->DidCreateAcceleratedSurfaceChildWindow(parent_window,
  1799. child_window);
  1800. }
  1801. #endif
  1802. const gpu::gles2::FeatureInfo* SkiaOutputSurfaceImplOnGpu::GetFeatureInfo()
  1803. const {
  1804. return feature_info_.get();
  1805. }
  1806. const gpu::GpuPreferences& SkiaOutputSurfaceImplOnGpu::GetGpuPreferences()
  1807. const {
  1808. return gpu_preferences_;
  1809. }
  1810. GpuVSyncCallback SkiaOutputSurfaceImplOnGpu::GetGpuVSyncCallback() {
  1811. return gpu_vsync_callback_;
  1812. }
  1813. base::TimeDelta SkiaOutputSurfaceImplOnGpu::GetGpuBlockedTimeSinceLastSwap() {
  1814. return dependency_->GetGpuBlockedTimeSinceLastSwap();
  1815. }
  1816. void SkiaOutputSurfaceImplOnGpu::DidSwapBuffersCompleteInternal(
  1817. gpu::SwapBuffersCompleteParams params,
  1818. const gfx::Size& pixel_size,
  1819. gfx::GpuFenceHandle release_fence) {
  1820. if (params.swap_response.result == gfx::SwapResult::SWAP_FAILED) {
  1821. DLOG(ERROR) << "Context lost on SWAP_FAILED";
  1822. if (!context_state_->IsCurrent(nullptr) ||
  1823. !context_state_->CheckResetStatus(false)) {
  1824. // Mark the context lost if not already lost.
  1825. MarkContextLost(ContextLostReason::CONTEXT_LOST_SWAP_FAILED);
  1826. }
  1827. } else if (params.swap_response.result ==
  1828. gfx::SwapResult::SWAP_NAK_RECREATE_BUFFERS) {
  1829. // We shouldn't present newly reallocated buffers until we have fully
  1830. // initialized their contents. SWAP_NAK_RECREAETE_BUFFERS should trigger a
  1831. // full-screen damage in DirectRenderer, but there is no guarantee that it
  1832. // will happen immediately since the SwapBuffersComplete task gets posted
  1833. // back to the Viz thread and will race with the next invocation of
  1834. // DrawFrame. To ensure we do not display uninitialized memory, we hold
  1835. // off on submitting new frames until we have received a full damage.
  1836. waiting_for_full_damage_ = true;
  1837. }
  1838. PostTaskToClientThread(base::BindOnce(did_swap_buffer_complete_callback_,
  1839. params, pixel_size,
  1840. std::move(release_fence)));
  1841. }
  1842. SkiaOutputSurfaceImplOnGpu::DidSwapBufferCompleteCallback
  1843. SkiaOutputSurfaceImplOnGpu::GetDidSwapBuffersCompleteCallback() {
  1844. return base::BindRepeating(
  1845. &SkiaOutputSurfaceImplOnGpu::DidSwapBuffersCompleteInternal, weak_ptr_);
  1846. }
  1847. void SkiaOutputSurfaceImplOnGpu::OnContextLost() {
  1848. MarkContextLost(ContextLostReason::CONTEXT_LOST_UNKNOWN);
  1849. }
  1850. void SkiaOutputSurfaceImplOnGpu::MarkContextLost(ContextLostReason reason) {
  1851. // This function potentially can be re-entered during from
  1852. // SharedContextState::MarkContextLost(). This guards against it.
  1853. if (context_is_lost_)
  1854. return;
  1855. context_is_lost_ = true;
  1856. UMA_HISTOGRAM_ENUMERATION("GPU.ContextLost.DisplayCompositor", reason);
  1857. // Release all ongoing AsyncReadResults.
  1858. ReleaseAsyncReadResultHelpers();
  1859. context_state_->MarkContextLost();
  1860. if (context_lost_callback_) {
  1861. PostTaskToClientThread(std::move(context_lost_callback_));
  1862. }
  1863. }
  1864. void SkiaOutputSurfaceImplOnGpu::ScheduleCheckReadbackCompletion() {
  1865. if (num_readbacks_pending_ > 0 && !readback_poll_pending_) {
  1866. dependency_->ScheduleDelayedGPUTaskFromGPUThread(
  1867. base::BindOnce(&SkiaOutputSurfaceImplOnGpu::CheckReadbackCompletion,
  1868. weak_ptr_factory_.GetWeakPtr()));
  1869. readback_poll_pending_ = true;
  1870. }
  1871. }
  1872. void SkiaOutputSurfaceImplOnGpu::CheckReadbackCompletion() {
  1873. readback_poll_pending_ = false;
  1874. // If there are no pending readback requests or we can't make the context
  1875. // current then exit. There is no thing to do here.
  1876. if (num_readbacks_pending_ == 0 || !MakeCurrent(/*need_framebuffer=*/false))
  1877. return;
  1878. gr_context()->checkAsyncWorkCompletion();
  1879. ScheduleCheckReadbackCompletion();
  1880. }
  1881. void SkiaOutputSurfaceImplOnGpu::PreserveChildSurfaceControls() {
  1882. if (gl_surface_)
  1883. gl_surface_->PreserveChildSurfaceControls();
  1884. }
  1885. void SkiaOutputSurfaceImplOnGpu::InitDelegatedInkPointRendererReceiver(
  1886. mojo::PendingReceiver<gfx::mojom::DelegatedInkPointRenderer>
  1887. pending_receiver) {
  1888. if (gl_surface_) {
  1889. gl_surface_->InitDelegatedInkPointRendererReceiver(
  1890. std::move(pending_receiver));
  1891. }
  1892. }
  1893. const scoped_refptr<AsyncReadResultLock>
  1894. SkiaOutputSurfaceImplOnGpu::GetAsyncReadResultLock() const {
  1895. return async_read_result_lock_;
  1896. }
  1897. void SkiaOutputSurfaceImplOnGpu::AddAsyncReadResultHelperWithLock(
  1898. AsyncReadResultHelper* helper) {
  1899. async_read_result_lock_->lock().AssertAcquired();
  1900. DCHECK(helper);
  1901. async_read_result_helpers_.insert(helper);
  1902. }
  1903. void SkiaOutputSurfaceImplOnGpu::RemoveAsyncReadResultHelperWithLock(
  1904. AsyncReadResultHelper* helper) {
  1905. async_read_result_lock_->lock().AssertAcquired();
  1906. DCHECK(helper);
  1907. DCHECK(async_read_result_helpers_.count(helper));
  1908. async_read_result_helpers_.erase(helper);
  1909. }
  1910. void SkiaOutputSurfaceImplOnGpu::EnsureBackbuffer() {
  1911. // We call GLSurface::SetBackbuffferAllocation in Ensure/Discard backbuffer,
  1912. // so technically need framebuffer. In reality no GLSurface implements it, but
  1913. // until it's removed we should keep true here.
  1914. MakeCurrent(/*need_framebuffer=*/true);
  1915. output_device_->EnsureBackbuffer();
  1916. }
  1917. void SkiaOutputSurfaceImplOnGpu::DiscardBackbuffer() {
  1918. // We call GLSurface::SetBackbuffferAllocation in Ensure/Discard backbuffer,
  1919. // so technically need framebuffer. In reality no GLSurface implements it, but
  1920. // until it's removed we should keep true here.
  1921. MakeCurrent(/*need_framebuffer=*/true);
  1922. output_device_->DiscardBackbuffer();
  1923. }
  1924. #if BUILDFLAG(ENABLE_VULKAN)
  1925. gfx::GpuFenceHandle SkiaOutputSurfaceImplOnGpu::CreateReleaseFenceForVulkan(
  1926. const GrBackendSemaphore& semaphore) {
  1927. DCHECK(is_using_vulkan());
  1928. if (semaphore.vkSemaphore() == VK_NULL_HANDLE)
  1929. return {};
  1930. auto* implementation = vulkan_context_provider_->GetVulkanImplementation();
  1931. VkDevice device =
  1932. vulkan_context_provider_->GetDeviceQueue()->GetVulkanDevice();
  1933. auto handle =
  1934. implementation->GetSemaphoreHandle(device, semaphore.vkSemaphore());
  1935. if (!handle.is_valid()) {
  1936. vkDestroySemaphore(device, semaphore.vkSemaphore(),
  1937. /*pAllocator=*/nullptr);
  1938. LOG(ERROR) << "Failed to create a release fence for Vulkan.";
  1939. return {};
  1940. }
  1941. return std::move(handle).ToGpuFenceHandle();
  1942. }
  1943. bool SkiaOutputSurfaceImplOnGpu::CreateAndStoreExternalSemaphoreVulkan(
  1944. std::vector<GrBackendSemaphore>& end_semaphores) {
  1945. DCHECK(is_using_vulkan());
  1946. auto* implementation = vulkan_context_provider_->GetVulkanImplementation();
  1947. VkDevice device =
  1948. vulkan_context_provider_->GetDeviceQueue()->GetVulkanDevice();
  1949. VkSemaphore semaphore = implementation->CreateExternalSemaphore(device);
  1950. if (semaphore == VK_NULL_HANDLE) {
  1951. LOG(ERROR)
  1952. << "Creation of an external semaphore for a release fence failed.";
  1953. return false;
  1954. }
  1955. end_semaphores.emplace_back();
  1956. end_semaphores.back().initVulkan(semaphore);
  1957. return true;
  1958. }
  1959. #endif
  1960. gfx::GpuFenceHandle SkiaOutputSurfaceImplOnGpu::CreateReleaseFenceForGL() {
  1961. if (gl::GLFence::IsGpuFenceSupported()) {
  1962. auto fence = gl::GLFence::CreateForGpuFence();
  1963. if (fence)
  1964. return fence->GetGpuFence()->GetGpuFenceHandle().Clone();
  1965. }
  1966. return {};
  1967. }
  1968. } // namespace viz