diff --git a/impeller/renderer/backend/gles/blit_command_gles.cc b/impeller/renderer/backend/gles/blit_command_gles.cc index c5ea94797b484..2269b8a96fbab 100644 --- a/impeller/renderer/backend/gles/blit_command_gles.cc +++ b/impeller/renderer/backend/gles/blit_command_gles.cc @@ -9,6 +9,7 @@ #include "impeller/base/validation.h" #include "impeller/geometry/point.h" #include "impeller/renderer/backend/gles/device_buffer_gles.h" +#include "impeller/renderer/backend/gles/reactor_gles.h" #include "impeller/renderer/backend/gles/texture_gles.h" namespace impeller { @@ -73,7 +74,7 @@ bool BlitCopyTextureToTextureCommandGLES::Encode( // emulate the blit when it's not available in the driver. if (!gl.BlitFramebuffer.IsAvailable()) { // TODO(135818): Emulate the blit using a raster draw call here. - FML_LOG(ERROR) << "Texture blit fallback not implemented yet for GLES2."; + VALIDATION_LOG << "Texture blit fallback not implemented yet for GLES2."; return false; } @@ -353,4 +354,69 @@ bool BlitGenerateMipmapCommandGLES::Encode(const ReactorGLES& reactor) const { return true; }; +////// BlitResizeTextureCommandGLES +////////////////////////////////////////////////////// + +BlitResizeTextureCommandGLES::~BlitResizeTextureCommandGLES() = default; + +std::string BlitResizeTextureCommandGLES::GetLabel() const { + return "Resize Texture"; +} + +bool BlitResizeTextureCommandGLES::Encode(const ReactorGLES& reactor) const { + const auto& gl = reactor.GetProcTable(); + + // glBlitFramebuffer is a GLES3 proc. Since we target GLES2, we need to + // emulate the blit when it's not available in the driver. + if (!gl.BlitFramebuffer.IsAvailable()) { + // TODO(135818): Emulate the blit using a raster draw call here. + VALIDATION_LOG << "Texture blit fallback not implemented yet for GLES2."; + return false; + } + + GLuint read_fbo = GL_NONE; + GLuint draw_fbo = GL_NONE; + fml::ScopedCleanupClosure delete_fbos([&gl, &read_fbo, &draw_fbo]() { + DeleteFBO(gl, read_fbo, GL_READ_FRAMEBUFFER); + DeleteFBO(gl, draw_fbo, GL_DRAW_FRAMEBUFFER); + }); + + { + auto read = ConfigureFBO(gl, source, GL_READ_FRAMEBUFFER); + if (!read.has_value()) { + return false; + } + read_fbo = read.value(); + } + + { + auto draw = ConfigureFBO(gl, destination, GL_DRAW_FRAMEBUFFER); + if (!draw.has_value()) { + return false; + } + draw_fbo = draw.value(); + } + + gl.Disable(GL_SCISSOR_TEST); + gl.Disable(GL_DEPTH_TEST); + gl.Disable(GL_STENCIL_TEST); + + const IRect source_region = IRect::MakeSize(source->GetSize()); + const IRect destination_region = IRect::MakeSize(destination->GetSize()); + + gl.BlitFramebuffer(source_region.GetX(), // srcX0 + source_region.GetY(), // srcY0 + source_region.GetWidth(), // srcX1 + source_region.GetHeight(), // srcY1 + destination_region.GetX(), // dstX0 + destination_region.GetY(), // dstY0 + destination_region.GetWidth(), // dstX1 + destination_region.GetHeight(), // dstY1 + GL_COLOR_BUFFER_BIT, // mask + GL_LINEAR // filter + ); + + return true; +} + } // namespace impeller diff --git a/impeller/renderer/backend/gles/blit_command_gles.h b/impeller/renderer/backend/gles/blit_command_gles.h index f4cd901a3e848..1a3f35f1d2e82 100644 --- a/impeller/renderer/backend/gles/blit_command_gles.h +++ b/impeller/renderer/backend/gles/blit_command_gles.h @@ -59,6 +59,15 @@ struct BlitGenerateMipmapCommandGLES : public BlitEncodeGLES, [[nodiscard]] bool Encode(const ReactorGLES& reactor) const override; }; +struct BlitResizeTextureCommandGLES : public BlitEncodeGLES, + public BlitResizeTextureCommand { + ~BlitResizeTextureCommandGLES() override; + + std::string GetLabel() const override; + + [[nodiscard]] bool Encode(const ReactorGLES& reactor) const override; +}; + } // namespace impeller #endif // FLUTTER_IMPELLER_RENDERER_BACKEND_GLES_BLIT_COMMAND_GLES_H_ diff --git a/impeller/renderer/backend/gles/blit_pass_gles.cc b/impeller/renderer/backend/gles/blit_pass_gles.cc index d83ef326856f8..28669a9f217cb 100644 --- a/impeller/renderer/backend/gles/blit_pass_gles.cc +++ b/impeller/renderer/backend/gles/blit_pass_gles.cc @@ -157,4 +157,15 @@ bool BlitPassGLES::OnGenerateMipmapCommand(std::shared_ptr texture, return true; } +// |BlitPass| +bool BlitPassGLES::ResizeTexture(const std::shared_ptr& source, + const std::shared_ptr& destination) { + auto command = std::make_unique(); + command->source = source; + command->destination = destination; + + commands_.push_back(std::move(command)); + return true; +} + } // namespace impeller diff --git a/impeller/renderer/backend/gles/blit_pass_gles.h b/impeller/renderer/backend/gles/blit_pass_gles.h index e34bb1614abb0..de688da2f63d8 100644 --- a/impeller/renderer/backend/gles/blit_pass_gles.h +++ b/impeller/renderer/backend/gles/blit_pass_gles.h @@ -40,6 +40,10 @@ class BlitPassGLES final : public BlitPass, bool EncodeCommands( const std::shared_ptr& transients_allocator) const override; + // |BlitPass| + bool ResizeTexture(const std::shared_ptr& source, + const std::shared_ptr& destination) override; + // |BlitPass| bool OnCopyTextureToTextureCommand(std::shared_ptr source, std::shared_ptr destination, diff --git a/impeller/renderer/backend/metal/BUILD.gn b/impeller/renderer/backend/metal/BUILD.gn index 343330fc2ad23..9f7156da6cdc2 100644 --- a/impeller/renderer/backend/metal/BUILD.gn +++ b/impeller/renderer/backend/metal/BUILD.gn @@ -59,7 +59,10 @@ impeller_component("metal") { "//flutter/fml", ] - frameworks = [ "Metal.framework" ] + frameworks = [ + "Metal.framework", + "MetalPerformanceShaders.framework", + ] } impeller_component("metal_unittests") { diff --git a/impeller/renderer/backend/metal/blit_pass_mtl.h b/impeller/renderer/backend/metal/blit_pass_mtl.h index 1de0f45aedc3d..2fcb1832de360 100644 --- a/impeller/renderer/backend/metal/blit_pass_mtl.h +++ b/impeller/renderer/backend/metal/blit_pass_mtl.h @@ -21,6 +21,7 @@ class BlitPassMTL final : public BlitPass { id encoder_ = nil; id buffer_ = nil; + id device_ = nil; bool is_valid_ = false; bool is_metal_trace_active_ = false; // Many parts of the codebase will start writing to a render pass but @@ -28,7 +29,7 @@ class BlitPassMTL final : public BlitPass { // so that in the dtor we can always ensure the render pass is finished. mutable bool did_finish_encoding_ = false; - explicit BlitPassMTL(id buffer); + explicit BlitPassMTL(id buffer, id device); // |BlitPass| bool IsValid() const override; @@ -40,6 +41,10 @@ class BlitPassMTL final : public BlitPass { bool EncodeCommands( const std::shared_ptr& transients_allocator) const override; + // |BlitPass| + bool ResizeTexture(const std::shared_ptr& source, + const std::shared_ptr& destination) override; + // |BlitPass| bool OnCopyTextureToTextureCommand(std::shared_ptr source, std::shared_ptr destination, diff --git a/impeller/renderer/backend/metal/blit_pass_mtl.mm b/impeller/renderer/backend/metal/blit_pass_mtl.mm index 329fe5197caf8..8f36eff26326d 100644 --- a/impeller/renderer/backend/metal/blit_pass_mtl.mm +++ b/impeller/renderer/backend/metal/blit_pass_mtl.mm @@ -4,6 +4,7 @@ #include "impeller/renderer/backend/metal/blit_pass_mtl.h" #include +#import #include #include #include @@ -25,7 +26,8 @@ namespace impeller { -BlitPassMTL::BlitPassMTL(id buffer) : buffer_(buffer) { +BlitPassMTL::BlitPassMTL(id buffer, id device) + : buffer_(buffer), device_(device) { if (!buffer_) { return; } @@ -105,7 +107,28 @@ [encoder_ popDebugGroup]; } #endif // IMPELLER_DEBUG + return true; +} + +// |BlitPass| +bool BlitPassMTL::ResizeTexture(const std::shared_ptr& source, + const std::shared_ptr& destination) { + auto source_mtl = TextureMTL::Cast(*source).GetMTLTexture(); + if (!source_mtl) { + return false; + } + auto destination_mtl = TextureMTL::Cast(*destination).GetMTLTexture(); + if (!destination_mtl) { + return false; + } + + [encoder_ endEncoding]; + auto filter = [[MPSImageBilinearScale alloc] initWithDevice:device_]; + [filter encodeToCommandBuffer:buffer_ + sourceTexture:source_mtl + destinationTexture:destination_mtl]; + encoder_ = [buffer_ blitCommandEncoder]; return true; } diff --git a/impeller/renderer/backend/metal/command_buffer_mtl.h b/impeller/renderer/backend/metal/command_buffer_mtl.h index 58a503654796e..dad6379bdb519 100644 --- a/impeller/renderer/backend/metal/command_buffer_mtl.h +++ b/impeller/renderer/backend/metal/command_buffer_mtl.h @@ -20,9 +20,11 @@ class CommandBufferMTL final : public CommandBuffer { private: friend class ContextMTL; - id buffer_ = nullptr; + id buffer_ = nil; + id device_ = nil; CommandBufferMTL(const std::weak_ptr& context, + id device, id queue); // |CommandBuffer| diff --git a/impeller/renderer/backend/metal/command_buffer_mtl.mm b/impeller/renderer/backend/metal/command_buffer_mtl.mm index 5909426cbb544..21e9873d9c138 100644 --- a/impeller/renderer/backend/metal/command_buffer_mtl.mm +++ b/impeller/renderer/backend/metal/command_buffer_mtl.mm @@ -128,8 +128,11 @@ static bool LogMTLCommandBufferErrorIfPresent(id buffer) { } CommandBufferMTL::CommandBufferMTL(const std::weak_ptr& context, + id device, id queue) - : CommandBuffer(context), buffer_(CreateCommandBuffer(queue)) {} + : CommandBuffer(context), + buffer_(CreateCommandBuffer(queue)), + device_(device) {} CommandBufferMTL::~CommandBufferMTL() = default; @@ -208,7 +211,7 @@ static bool LogMTLCommandBufferErrorIfPresent(id buffer) { return nullptr; } - auto pass = std::shared_ptr(new BlitPassMTL(buffer_)); + auto pass = std::shared_ptr(new BlitPassMTL(buffer_, device_)); if (!pass->IsValid()) { return nullptr; } diff --git a/impeller/renderer/backend/metal/context_mtl.h b/impeller/renderer/backend/metal/context_mtl.h index b09933c752dfc..dbc3806939fe6 100644 --- a/impeller/renderer/backend/metal/context_mtl.h +++ b/impeller/renderer/backend/metal/context_mtl.h @@ -12,6 +12,7 @@ #include "flutter/fml/concurrent_message_loop.h" #include "flutter/fml/synchronization/sync_switch.h" +#include "fml/closure.h" #include "impeller/base/backend_cast.h" #include "impeller/core/sampler.h" #include "impeller/renderer/backend/metal/allocator_mtl.h" @@ -136,7 +137,8 @@ class ContextMTL final : public Context, #endif // IMPELLER_DEBUG // |Context| - void StoreTaskForGPU(const std::function& task) override; + void StoreTaskForGPU(const fml::closure& task, + const fml::closure& failure) override; private: class SyncSwitchObserver : public fml::SyncSwitch::Observer { @@ -149,6 +151,11 @@ class ContextMTL final : public Context, ContextMTL& parent_; }; + struct PendingTasks { + fml::closure task; + fml::closure failure; + }; + id device_ = nullptr; id command_queue_ = nullptr; std::shared_ptr shader_library_; @@ -157,7 +164,7 @@ class ContextMTL final : public Context, std::shared_ptr resource_allocator_; std::shared_ptr device_capabilities_; std::shared_ptr is_gpu_disabled_sync_switch_; - std::deque> tasks_awaiting_gpu_; + std::deque tasks_awaiting_gpu_; std::unique_ptr sync_switch_observer_; std::shared_ptr command_queue_ip_; #ifdef IMPELLER_DEBUG diff --git a/impeller/renderer/backend/metal/context_mtl.mm b/impeller/renderer/backend/metal/context_mtl.mm index 03c6ea073950f..fdd3202edd67f 100644 --- a/impeller/renderer/backend/metal/context_mtl.mm +++ b/impeller/renderer/backend/metal/context_mtl.mm @@ -338,7 +338,7 @@ new ContextMTL(device, command_queue, } auto buffer = std::shared_ptr( - new CommandBufferMTL(weak_from_this(), queue)); + new CommandBufferMTL(weak_from_this(), device_, queue)); if (!buffer->IsValid()) { return nullptr; } @@ -377,17 +377,21 @@ new ContextMTL(device, command_queue, return buffer; } -void ContextMTL::StoreTaskForGPU(const std::function& task) { - tasks_awaiting_gpu_.emplace_back(task); +void ContextMTL::StoreTaskForGPU(const fml::closure& task, + const fml::closure& failure) { + tasks_awaiting_gpu_.push_back(PendingTasks{task, failure}); while (tasks_awaiting_gpu_.size() > kMaxTasksAwaitingGPU) { - tasks_awaiting_gpu_.front()(); + PendingTasks front = std::move(tasks_awaiting_gpu_.front()); + if (front.failure) { + front.failure(); + } tasks_awaiting_gpu_.pop_front(); } } void ContextMTL::FlushTasksAwaitingGPU() { for (const auto& task : tasks_awaiting_gpu_) { - task(); + task.task(); } tasks_awaiting_gpu_.clear(); } diff --git a/impeller/renderer/backend/vulkan/blit_pass_vk.cc b/impeller/renderer/backend/vulkan/blit_pass_vk.cc index b21403955edc2..a9fce56228c2d 100644 --- a/impeller/renderer/backend/vulkan/blit_pass_vk.cc +++ b/impeller/renderer/backend/vulkan/blit_pass_vk.cc @@ -319,6 +319,90 @@ bool BlitPassVK::OnCopyBufferToTextureCommand( return true; } +// |BlitPass| +bool BlitPassVK::ResizeTexture(const std::shared_ptr& source, + const std::shared_ptr& destination) { + auto& encoder = *command_buffer_->GetEncoder(); + const auto& cmd_buffer = encoder.GetCommandBuffer(); + + const auto& src = TextureVK::Cast(*source); + const auto& dst = TextureVK::Cast(*destination); + + if (!encoder.Track(source) || !encoder.Track(destination)) { + return false; + } + + BarrierVK src_barrier; + src_barrier.cmd_buffer = cmd_buffer; + src_barrier.new_layout = vk::ImageLayout::eTransferSrcOptimal; + src_barrier.src_access = vk::AccessFlagBits::eTransferWrite | + vk::AccessFlagBits::eShaderWrite | + vk::AccessFlagBits::eColorAttachmentWrite; + src_barrier.src_stage = vk::PipelineStageFlagBits::eTransfer | + vk::PipelineStageFlagBits::eFragmentShader | + vk::PipelineStageFlagBits::eColorAttachmentOutput; + src_barrier.dst_access = vk::AccessFlagBits::eTransferRead; + src_barrier.dst_stage = vk::PipelineStageFlagBits::eTransfer; + + BarrierVK dst_barrier; + dst_barrier.cmd_buffer = cmd_buffer; + dst_barrier.new_layout = vk::ImageLayout::eTransferDstOptimal; + dst_barrier.src_access = {}; + dst_barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe; + dst_barrier.dst_access = + vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eTransferWrite; + dst_barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader | + vk::PipelineStageFlagBits::eTransfer; + + if (!src.SetLayout(src_barrier) || !dst.SetLayout(dst_barrier)) { + VALIDATION_LOG << "Could not complete layout transitions."; + return false; + } + + vk::ImageBlit blit; + blit.srcSubresource.aspectMask = vk::ImageAspectFlagBits::eColor; + blit.srcSubresource.baseArrayLayer = 0u; + blit.srcSubresource.layerCount = 1u; + blit.srcSubresource.mipLevel = 0; + + blit.dstSubresource.aspectMask = vk::ImageAspectFlagBits::eColor; + blit.dstSubresource.baseArrayLayer = 0u; + blit.dstSubresource.layerCount = 1u; + blit.dstSubresource.mipLevel = 0; + + // offsets[0] is origin. + blit.srcOffsets[1].x = std::max(source->GetSize().width, 1u); + blit.srcOffsets[1].y = std::max(source->GetSize().height, 1u); + blit.srcOffsets[1].z = 1u; + + // offsets[0] is origin. + blit.dstOffsets[1].x = std::max(destination->GetSize().width, 1u); + blit.dstOffsets[1].y = std::max(destination->GetSize().height, 1u); + blit.dstOffsets[1].z = 1u; + + cmd_buffer.blitImage(src.GetImage(), // + src_barrier.new_layout, // + dst.GetImage(), // + dst_barrier.new_layout, // + 1, // + &blit, // + vk::Filter::eLinear + + ); + + // Convert back to shader read + + BarrierVK barrier; + barrier.cmd_buffer = cmd_buffer; + barrier.new_layout = vk::ImageLayout::eShaderReadOnlyOptimal; + barrier.src_access = {}; + barrier.src_stage = vk::PipelineStageFlagBits::eTopOfPipe; + barrier.dst_access = vk::AccessFlagBits::eShaderRead; + barrier.dst_stage = vk::PipelineStageFlagBits::eFragmentShader; + + return dst.SetLayout(barrier); +} + // |BlitPass| bool BlitPassVK::OnGenerateMipmapCommand(std::shared_ptr texture, std::string label) { diff --git a/impeller/renderer/backend/vulkan/blit_pass_vk.h b/impeller/renderer/backend/vulkan/blit_pass_vk.h index 7d9c4c7368338..fd5ba51dd691c 100644 --- a/impeller/renderer/backend/vulkan/blit_pass_vk.h +++ b/impeller/renderer/backend/vulkan/blit_pass_vk.h @@ -37,6 +37,10 @@ class BlitPassVK final : public BlitPass { bool EncodeCommands( const std::shared_ptr& transients_allocator) const override; + // |BlitPass| + bool ResizeTexture(const std::shared_ptr& source, + const std::shared_ptr& destination) override; + // |BlitPass| bool ConvertTextureToShaderRead( const std::shared_ptr& texture) override; diff --git a/impeller/renderer/blit_command.h b/impeller/renderer/blit_command.h index 0bae872ab0796..8ae331b4bd0a5 100644 --- a/impeller/renderer/blit_command.h +++ b/impeller/renderer/blit_command.h @@ -22,6 +22,11 @@ struct BlitCopyTextureToTextureCommand : public BlitCommand { IPoint destination_origin; }; +struct BlitResizeTextureCommand : public BlitCommand { + std::shared_ptr source; + std::shared_ptr destination; +}; + struct BlitCopyTextureToBufferCommand : public BlitCommand { std::shared_ptr source; std::shared_ptr destination; diff --git a/impeller/renderer/blit_pass.h b/impeller/renderer/blit_pass.h index 6f5f332b3aa6f..f48c047691c2b 100644 --- a/impeller/renderer/blit_pass.h +++ b/impeller/renderer/blit_pass.h @@ -39,6 +39,14 @@ class BlitPass { virtual bool ConvertTextureToShaderRead( const std::shared_ptr& texture); + //---------------------------------------------------------------------------- + /// @brief Resize the [source] texture into the [destination] texture. + /// + /// On Metal platforms, [destination] is required to be non-lossy + /// and have the Shader read capability. + virtual bool ResizeTexture(const std::shared_ptr& source, + const std::shared_ptr& destination) = 0; + //---------------------------------------------------------------------------- /// @brief Record a command to copy the contents of one texture to /// another texture. The blit area is limited by the intersection diff --git a/impeller/renderer/blit_pass_unittests.cc b/impeller/renderer/blit_pass_unittests.cc index b99a3d0aff443..1f902b1844b56 100644 --- a/impeller/renderer/blit_pass_unittests.cc +++ b/impeller/renderer/blit_pass_unittests.cc @@ -2,8 +2,11 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include +#include "fml/mapping.h" #include "gtest/gtest.h" #include "impeller/base/validation.h" +#include "impeller/core/device_buffer.h" #include "impeller/core/device_buffer_descriptor.h" #include "impeller/core/formats.h" #include "impeller/core/texture_descriptor.h" @@ -130,5 +133,44 @@ TEST_P(BlitPassTest, CanBlitSmallRegionToUninitializedTexture) { EXPECT_TRUE(context->GetCommandQueue()->Submit({std::move(cmd_buffer)}).ok()); } +TEST_P(BlitPassTest, CanResizeTextures) { + auto context = GetContext(); + auto cmd_buffer = context->CreateCommandBuffer(); + auto blit_pass = cmd_buffer->CreateBlitPass(); + + TextureDescriptor dst_format; + dst_format.storage_mode = StorageMode::kDevicePrivate; + dst_format.format = PixelFormat::kR8G8B8A8UNormInt; + dst_format.size = {10, 10}; + dst_format.usage = TextureUsage::kShaderRead | TextureUsage::kShaderWrite; + auto dst = context->GetResourceAllocator()->CreateTexture(dst_format); + + TextureDescriptor src_format; + src_format.storage_mode = StorageMode::kDevicePrivate; + src_format.format = PixelFormat::kR8G8B8A8UNormInt; + src_format.size = {100, 100}; + auto src = context->GetResourceAllocator()->CreateTexture(src_format); + + std::vector bytes(src_format.GetByteSizeOfBaseMipLevel()); + for (auto i = 0u; i < src_format.GetByteSizeOfBaseMipLevel(); i += 4) { + // RGBA + bytes[i + 0] = 255; + bytes[i + 1] = 0; + bytes[i + 2] = 0; + bytes[i + 3] = 255; + } + auto mapping = fml::DataMapping(bytes); + auto staging = context->GetResourceAllocator()->CreateBufferWithCopy(mapping); + + ASSERT_TRUE(dst); + ASSERT_TRUE(src); + ASSERT_TRUE(staging); + + EXPECT_TRUE(blit_pass->AddCopy(DeviceBuffer::AsBufferView(staging), src)); + EXPECT_TRUE(blit_pass->ResizeTexture(src, dst)); + EXPECT_TRUE(blit_pass->EncodeCommands(GetContext()->GetResourceAllocator())); + EXPECT_TRUE(context->GetCommandQueue()->Submit({std::move(cmd_buffer)}).ok()); +} + } // namespace testing } // namespace impeller diff --git a/impeller/renderer/context.h b/impeller/renderer/context.h index 4a51560978bb2..7c05ce92264c3 100644 --- a/impeller/renderer/context.h +++ b/impeller/renderer/context.h @@ -8,6 +8,7 @@ #include #include +#include "fml/closure.h" #include "impeller/core/allocator.h" #include "impeller/core/formats.h" #include "impeller/renderer/capabilities.h" @@ -176,10 +177,14 @@ class Context { /// being available or that the task has been canceled. The task should /// operate with the `SyncSwitch` to make sure the GPU is accessible. /// + /// If the queue of pending tasks is cleared without GPU access, then the + /// failure callback will be invoked and the primary task function will not + /// /// Threadsafe. /// /// `task` will be executed on the platform thread. - virtual void StoreTaskForGPU(const std::function& task) { + virtual void StoreTaskForGPU(const fml::closure& task, + const fml::closure& failure) { FML_CHECK(false && "not supported in this context"); } diff --git a/impeller/renderer/testing/mocks.h b/impeller/renderer/testing/mocks.h index d4e27449bfe65..b1b1ff6964233 100644 --- a/impeller/renderer/testing/mocks.h +++ b/impeller/renderer/testing/mocks.h @@ -61,6 +61,12 @@ class MockBlitPass : public BlitPass { (const, override)); MOCK_METHOD(void, OnSetLabel, (std::string label), (override)); + MOCK_METHOD(bool, + ResizeTexture, + (const std::shared_ptr& source, + const std::shared_ptr& destination), + (override)); + MOCK_METHOD(bool, OnCopyTextureToTextureCommand, (std::shared_ptr source, diff --git a/lib/ui/painting/image_decoder_impeller.cc b/lib/ui/painting/image_decoder_impeller.cc index 76a5b25202f14..565e034545a74 100644 --- a/lib/ui/painting/image_decoder_impeller.cc +++ b/lib/ui/painting/image_decoder_impeller.cc @@ -15,6 +15,8 @@ #include "flutter/impeller/renderer/context.h" #include "impeller/base/strings.h" #include "impeller/core/device_buffer.h" +#include "impeller/core/formats.h" +#include "impeller/core/texture_descriptor.h" #include "impeller/display_list/skia_conversions.h" #include "impeller/geometry/size.h" #include "third_party/skia/include/core/SkAlphaType.h" @@ -26,7 +28,6 @@ #include "third_party/skia/include/core/SkPixelRef.h" #include "third_party/skia/include/core/SkPixmap.h" #include "third_party/skia/include/core/SkPoint.h" -#include "third_party/skia/include/core/SkSamplingOptions.h" #include "third_party/skia/include/core/SkSize.h" namespace flutter { @@ -224,59 +225,68 @@ DecompressResult ImageDecoderImpeller::DecompressTexture( bitmap = premul_bitmap; } - if (bitmap->dimensions() == target_size) { + std::shared_ptr buffer = + bitmap_allocator->GetDeviceBuffer(); + if (!buffer) { + return DecompressResult{.decode_error = "Unable to get device buffer"}; + } + buffer->Flush(); + + std::optional resize_info = + bitmap->dimensions() == target_size + ? std::nullopt + : std::optional(image_info.makeDimensions(target_size)); + + if (source_size.width() > max_texture_size.width || + source_size.height() > max_texture_size.height) { + //---------------------------------------------------------------------------- + /// 2. If the decoded image isn't the requested target size and the src size + /// exceeds the device max texture size, perform a slow CPU reisze. + /// + TRACE_EVENT0("impeller", "SlowCPUDecodeScale"); + const auto scaled_image_info = image_info.makeDimensions(target_size); + + auto scaled_bitmap = std::make_shared(); + auto scaled_allocator = std::make_shared(allocator); + scaled_bitmap->setInfo(scaled_image_info); + if (!scaled_bitmap->tryAllocPixels(scaled_allocator.get())) { + std::string decode_error( + "Could not allocate scaled bitmap for image decompression."); + FML_DLOG(ERROR) << decode_error; + return DecompressResult{.decode_error = decode_error}; + } + if (!bitmap->pixmap().scalePixels( + scaled_bitmap->pixmap(), + SkSamplingOptions(SkFilterMode::kLinear, SkMipmapMode::kNone))) { + FML_LOG(ERROR) << "Could not scale decoded bitmap data."; + } + scaled_bitmap->setImmutable(); + std::shared_ptr buffer = - bitmap_allocator->GetDeviceBuffer(); + scaled_allocator->GetDeviceBuffer(); if (!buffer) { return DecompressResult{.decode_error = "Unable to get device buffer"}; } buffer->Flush(); return DecompressResult{.device_buffer = std::move(buffer), - .sk_bitmap = bitmap, - .image_info = bitmap->info()}; + .sk_bitmap = scaled_bitmap, + .image_info = scaled_bitmap->info()}; } - //---------------------------------------------------------------------------- - /// 2. If the decoded image isn't the requested target size, resize it. - /// - - TRACE_EVENT0("impeller", "DecodeScale"); - const auto scaled_image_info = image_info.makeDimensions(target_size); - - auto scaled_bitmap = std::make_shared(); - auto scaled_allocator = std::make_shared(allocator); - scaled_bitmap->setInfo(scaled_image_info); - if (!scaled_bitmap->tryAllocPixels(scaled_allocator.get())) { - std::string decode_error( - "Could not allocate scaled bitmap for image decompression."); - FML_DLOG(ERROR) << decode_error; - return DecompressResult{.decode_error = decode_error}; - } - if (!bitmap->pixmap().scalePixels( - scaled_bitmap->pixmap(), - SkSamplingOptions(SkFilterMode::kLinear, SkMipmapMode::kNone))) { - FML_LOG(ERROR) << "Could not scale decoded bitmap data."; - } - scaled_bitmap->setImmutable(); - - std::shared_ptr buffer = - scaled_allocator->GetDeviceBuffer(); - buffer->Flush(); - - if (!buffer) { - return DecompressResult{.decode_error = "Unable to get device buffer"}; - } return DecompressResult{.device_buffer = std::move(buffer), - .sk_bitmap = scaled_bitmap, - .image_info = scaled_bitmap->info()}; + .sk_bitmap = bitmap, + .image_info = bitmap->info(), + .resize_info = resize_info}; } -/// Only call this method if the GPU is available. -static std::pair, std::string> UnsafeUploadTextureToPrivate( +// static +std::pair, std::string> +ImageDecoderImpeller::UnsafeUploadTextureToPrivate( const std::shared_ptr& context, const std::shared_ptr& buffer, - const SkImageInfo& image_info) { + const SkImageInfo& image_info, + const std::optional& resize_info) { const auto pixel_format = impeller::skia_conversions::ToPixelFormat(image_info.colorType()); if (!pixel_format) { @@ -292,6 +302,12 @@ static std::pair, std::string> UnsafeUploadTextureToPrivate( texture_descriptor.size = {image_info.width(), image_info.height()}; texture_descriptor.mip_count = texture_descriptor.size.MipCount(); texture_descriptor.compression_type = impeller::CompressionType::kLossy; + if (context->GetBackendType() == impeller::Context::BackendType::kMetal && + resize_info.has_value()) { + // The MPS used to resize images on iOS does not require mip generation. + // Remove mip count if we are resizing the image on the GPU. + texture_descriptor.mip_count = 1; + } auto dest_texture = context->GetResourceAllocator()->CreateTexture(texture_descriptor); @@ -323,59 +339,107 @@ static std::pair, std::string> UnsafeUploadTextureToPrivate( blit_pass->SetLabel("Mipmap Blit Pass"); blit_pass->AddCopy(impeller::DeviceBuffer::AsBufferView(buffer), dest_texture); - if (texture_descriptor.size.MipCount() > 1) { + if (texture_descriptor.mip_count > 1) { blit_pass->GenerateMipmap(dest_texture); } + std::shared_ptr result_texture = dest_texture; + if (resize_info.has_value()) { + impeller::TextureDescriptor resize_desc; + resize_desc.storage_mode = impeller::StorageMode::kDevicePrivate; + resize_desc.format = pixel_format.value(); + resize_desc.size = {resize_info->width(), resize_info->height()}; + resize_desc.mip_count = resize_desc.size.MipCount(); + resize_desc.compression_type = impeller::CompressionType::kLossy; + resize_desc.usage = impeller::TextureUsage::kShaderRead; + if (context->GetBackendType() == impeller::Context::BackendType::kMetal) { + // Resizing requires a MPS on Metal platforms. + resize_desc.usage |= impeller::TextureUsage::kShaderWrite; + resize_desc.compression_type = impeller::CompressionType::kLossless; + } + + auto resize_texture = + context->GetResourceAllocator()->CreateTexture(resize_desc); + if (!resize_texture) { + std::string decode_error("Could not create resized Impeller texture."); + FML_DLOG(ERROR) << decode_error; + return std::make_pair(nullptr, decode_error); + } + + blit_pass->ResizeTexture(/*source=*/dest_texture, + /*destination=*/resize_texture); + if (resize_desc.mip_count > 1) { + blit_pass->GenerateMipmap(resize_texture); + } + + result_texture = std::move(resize_texture); + } blit_pass->EncodeCommands(context->GetResourceAllocator()); + if (!context->GetCommandQueue()->Submit({command_buffer}).ok()) { - std::string decode_error("Failed to submit blit pass command buffer."); + std::string decode_error("Failed to submit image decoding command buffer."); FML_DLOG(ERROR) << decode_error; return std::make_pair(nullptr, decode_error); } return std::make_pair( - impeller::DlImageImpeller::Make(std::move(dest_texture)), std::string()); + impeller::DlImageImpeller::Make(std::move(result_texture)), + std::string()); } -std::pair, std::string> -ImageDecoderImpeller::UploadTextureToPrivate( +void ImageDecoderImpeller::UploadTextureToPrivate( + ImageResult result, const std::shared_ptr& context, const std::shared_ptr& buffer, const SkImageInfo& image_info, const std::shared_ptr& bitmap, + const std::optional& resize_info, const std::shared_ptr& gpu_disabled_switch) { TRACE_EVENT0("impeller", __FUNCTION__); if (!context) { - return std::make_pair(nullptr, "No Impeller context is available"); + result(nullptr, "No Impeller context is available"); + return; } if (!buffer) { - return std::make_pair(nullptr, "No Impeller device buffer is available"); + result(nullptr, "No Impeller device buffer is available"); + return; } - std::pair, std::string> result; gpu_disabled_switch->Execute( fml::SyncSwitch::Handlers() - .SetIfFalse([&result, context, buffer, image_info] { - result = UnsafeUploadTextureToPrivate(context, buffer, image_info); + .SetIfFalse([&result, context, buffer, image_info, resize_info] { + sk_sp image; + std::string decode_error; + std::tie(image, decode_error) = std::tie(image, decode_error) = + UnsafeUploadTextureToPrivate(context, buffer, image_info, + resize_info); + result(image, decode_error); }) - .SetIfTrue([&result, context, bitmap, gpu_disabled_switch] { - // create_mips is false because we already know the GPU is disabled. - result = - UploadTextureToStorage(context, bitmap, gpu_disabled_switch, - impeller::StorageMode::kHostVisible, - /*create_mips=*/false); + .SetIfTrue([&result, context, buffer, image_info, resize_info] { + // The `result` function must be copied in the capture list for each + // closure or the stack allocated callback will be cleared by the + // time to closure is executed later. + context->StoreTaskForGPU( + [result, context, buffer, image_info, resize_info]() { + sk_sp image; + std::string decode_error; + std::tie(image, decode_error) = + std::tie(image, decode_error) = + UnsafeUploadTextureToPrivate(context, buffer, + image_info, resize_info); + result(image, decode_error); + }, + [result]() { + result(nullptr, + "Image upload failed due to loss of GPU access."); + }); })); - return result; } std::pair, std::string> ImageDecoderImpeller::UploadTextureToStorage( const std::shared_ptr& context, - std::shared_ptr bitmap, - const std::shared_ptr& gpu_disabled_switch, - impeller::StorageMode storage_mode, - bool create_mips) { + std::shared_ptr bitmap) { TRACE_EVENT0("impeller", __FUNCTION__); if (!context) { return std::make_pair(nullptr, "No Impeller context is available"); @@ -394,11 +458,10 @@ ImageDecoderImpeller::UploadTextureToStorage( } impeller::TextureDescriptor texture_descriptor; - texture_descriptor.storage_mode = storage_mode; + texture_descriptor.storage_mode = impeller::StorageMode::kHostVisible; texture_descriptor.format = pixel_format.value(); texture_descriptor.size = {image_info.width(), image_info.height()}; - texture_descriptor.mip_count = - create_mips ? texture_descriptor.size.MipCount() : 1; + texture_descriptor.mip_count = 1; auto texture = context->GetResourceAllocator()->CreateTexture(texture_descriptor); @@ -421,43 +484,6 @@ ImageDecoderImpeller::UploadTextureToStorage( } texture->SetLabel(impeller::SPrintF("ui.Image(%p)", texture.get()).c_str()); - - if (texture_descriptor.mip_count > 1u && create_mips) { - std::optional decode_error; - - // The only platform that needs mipmapping unconditionally is GL. - // GL based platforms never disable GPU access. - // This is only really needed for iOS. - gpu_disabled_switch->Execute(fml::SyncSwitch::Handlers().SetIfFalse( - [context, &texture, &decode_error] { - auto command_buffer = context->CreateCommandBuffer(); - if (!command_buffer) { - decode_error = - "Could not create command buffer for mipmap generation."; - return; - } - command_buffer->SetLabel("Mipmap Command Buffer"); - - auto blit_pass = command_buffer->CreateBlitPass(); - if (!blit_pass) { - decode_error = "Could not create blit pass for mipmap generation."; - return; - } - blit_pass->SetLabel("Mipmap Blit Pass"); - blit_pass->GenerateMipmap(texture); - blit_pass->EncodeCommands(context->GetResourceAllocator()); - if (!context->GetCommandQueue()->Submit({command_buffer}).ok()) { - decode_error = "Failed to submit blit pass command buffer."; - return; - } - command_buffer->WaitUntilScheduled(); - })); - if (decode_error.has_value()) { - FML_DLOG(ERROR) << decode_error.value(); - return std::make_pair(nullptr, decode_error.value()); - } - } - return std::make_pair(impeller::DlImageImpeller::Make(std::move(texture)), std::string()); } @@ -509,14 +535,21 @@ void ImageDecoderImpeller::Decode(fml::RefPtr descriptor, auto upload_texture_and_invoke_result = [result, context, bitmap_result, gpu_disabled_switch]() { - sk_sp image; - std::string decode_error; - std::tie(image, decode_error) = UploadTextureToPrivate( - context, bitmap_result.device_buffer, bitmap_result.image_info, - bitmap_result.sk_bitmap, gpu_disabled_switch); - result(image, decode_error); + UploadTextureToPrivate(result, context, // + bitmap_result.device_buffer, // + bitmap_result.image_info, // + bitmap_result.sk_bitmap, // + bitmap_result.resize_info, // + gpu_disabled_switch // + ); }; - io_runner->PostTask(upload_texture_and_invoke_result); + // The I/O image uploads are not threadsafe on GLES. + if (context->GetBackendType() == + impeller::Context::BackendType::kOpenGLES) { + io_runner->PostTask(upload_texture_and_invoke_result); + } else { + upload_texture_and_invoke_result(); + } }); } diff --git a/lib/ui/painting/image_decoder_impeller.h b/lib/ui/painting/image_decoder_impeller.h index 063327b490d03..1d956a30a936c 100644 --- a/lib/ui/painting/image_decoder_impeller.h +++ b/lib/ui/painting/image_decoder_impeller.h @@ -11,6 +11,7 @@ #include "flutter/lib/ui/painting/image_decoder.h" #include "impeller/core/formats.h" #include "impeller/geometry/size.h" +#include "include/core/SkImageInfo.h" #include "third_party/skia/include/core/SkBitmap.h" namespace impeller { @@ -41,6 +42,7 @@ struct DecompressResult { std::shared_ptr device_buffer; std::shared_ptr sk_bitmap; SkImageInfo image_info; + std::optional resize_info = std::nullopt; std::string decode_error; }; @@ -69,34 +71,30 @@ class ImageDecoderImpeller final : public ImageDecoder { const std::shared_ptr& allocator); /// @brief Create a device private texture from the provided host buffer. - /// This method is only suported on the metal backend. + /// + /// @param result The image result closure that accepts the DlImage and + /// any encoding error messages. /// @param context The Impeller graphics context. /// @param buffer A host buffer containing the image to be uploaded. /// @param image_info Format information about the particular image. /// @param bitmap A bitmap containg the image to be uploaded. /// @param gpu_disabled_switch Whether the GPU is available command encoding. - /// @return A DlImage. - static std::pair, std::string> UploadTextureToPrivate( + static void UploadTextureToPrivate( + ImageResult result, const std::shared_ptr& context, const std::shared_ptr& buffer, const SkImageInfo& image_info, const std::shared_ptr& bitmap, + const std::optional& resize_info, const std::shared_ptr& gpu_disabled_switch); - /// @brief Create a host visible texture from the provided bitmap. + /// @brief Create a texture from the provided bitmap. /// @param context The Impeller graphics context. /// @param bitmap A bitmap containg the image to be uploaded. - /// @param create_mips Whether mipmaps should be generated for the given - /// image. - /// @param gpu_disabled_switch Whether the GPU is available for mipmap - /// creation. /// @return A DlImage. static std::pair, std::string> UploadTextureToStorage( const std::shared_ptr& context, - std::shared_ptr bitmap, - const std::shared_ptr& gpu_disabled_switch, - impeller::StorageMode storage_mode, - bool create_mips = true); + std::shared_ptr bitmap); private: using FutureContext = std::shared_future>; @@ -104,6 +102,13 @@ class ImageDecoderImpeller final : public ImageDecoder { const bool supports_wide_gamut_; std::shared_ptr gpu_disabled_switch_; + /// Only call this method if the GPU is available. + static std::pair, std::string> UnsafeUploadTextureToPrivate( + const std::shared_ptr& context, + const std::shared_ptr& buffer, + const SkImageInfo& image_info, + const std::optional& resize_info); + FML_DISALLOW_COPY_AND_ASSIGN(ImageDecoderImpeller); }; diff --git a/lib/ui/painting/image_decoder_unittests.cc b/lib/ui/painting/image_decoder_unittests.cc index 2504a95064b6e..d8f76671ae09b 100644 --- a/lib/ui/painting/image_decoder_unittests.cc +++ b/lib/ui/painting/image_decoder_unittests.cc @@ -76,11 +76,32 @@ class TestImpellerContext : public impeller::Context { return nullptr; } + void StoreTaskForGPU(const std::function& task, + const std::function& failure) override { + tasks_.push_back(PendingTask{task, failure}); + } + + void FlushTasks(bool fail = false) { + for (auto& task : tasks_) { + if (fail) { + task.task(); + } else { + task.failure(); + } + } + tasks_.clear(); + } + void Shutdown() override {} mutable size_t command_buffer_count_ = 0; private: + struct PendingTask { + std::function task; + std::function failure; + }; + std::vector tasks_; std::shared_ptr capabilities_; }; @@ -336,16 +357,66 @@ TEST_F(ImageDecoderFixtureTest, ImpellerUploadToSharedNoGpu) { desc.size = bitmap->computeByteSize(); auto buffer = std::make_shared(desc); - auto result = ImageDecoderImpeller::UploadTextureToPrivate( - no_gpu_access_context, buffer, info, bitmap, gpu_disabled_switch); - ASSERT_EQ(no_gpu_access_context->command_buffer_count_, 0ul); - ASSERT_EQ(result.second, ""); + bool invoked = false; + auto cb = [&invoked](const sk_sp& image, + const std::string& message) { invoked = true; }; + + ImageDecoderImpeller::UploadTextureToPrivate( + cb, no_gpu_access_context, buffer, info, bitmap, std::nullopt, + gpu_disabled_switch); + + EXPECT_EQ(no_gpu_access_context->command_buffer_count_, 0ul); + EXPECT_FALSE(invoked); + + auto result = ImageDecoderImpeller::UploadTextureToStorage( + no_gpu_access_context, bitmap); - result = ImageDecoderImpeller::UploadTextureToStorage( - no_gpu_access_context, bitmap, gpu_disabled_switch, - impeller::StorageMode::kHostVisible, true); ASSERT_EQ(no_gpu_access_context->command_buffer_count_, 0ul); ASSERT_EQ(result.second, ""); + + no_gpu_access_context->FlushTasks(/*fail=*/true); +} + +TEST_F(ImageDecoderFixtureTest, + ImpellerUploadToSharedNoGpuTaskFlushingFailure) { +#if !IMPELLER_SUPPORTS_RENDERING + GTEST_SKIP() << "Impeller only test."; +#endif // IMPELLER_SUPPORTS_RENDERING + + auto no_gpu_access_context = + std::make_shared(); + auto gpu_disabled_switch = std::make_shared(true); + + auto info = SkImageInfo::Make(10, 10, SkColorType::kRGBA_8888_SkColorType, + SkAlphaType::kPremul_SkAlphaType); + auto bitmap = std::make_shared(); + bitmap->allocPixels(info, 10 * 4); + impeller::DeviceBufferDescriptor desc; + desc.size = bitmap->computeByteSize(); + auto buffer = std::make_shared(desc); + + sk_sp image; + std::string message; + bool invoked = false; + auto cb = [&invoked, &image, &message](sk_sp p_image, + std::string p_message) { + invoked = true; + image = std::move(p_image); + message = std::move(p_message); + }; + + ImageDecoderImpeller::UploadTextureToPrivate( + cb, no_gpu_access_context, buffer, info, bitmap, std::nullopt, + gpu_disabled_switch); + + EXPECT_EQ(no_gpu_access_context->command_buffer_count_, 0ul); + EXPECT_FALSE(invoked); + + no_gpu_access_context->FlushTasks(/*fail=*/true); + + EXPECT_TRUE(invoked); + // Creation of the dl image will still fail with the mocked context. + EXPECT_NE(message, ""); } TEST_F(ImageDecoderFixtureTest, ImpellerNullColorspace) { @@ -707,8 +778,8 @@ TEST(ImageDecoderTest, VerifySimpleDecoding) { auto data = flutter::testing::OpenFixtureAsSkData("Horizontal.jpg"); auto image = SkImages::DeferredFromEncodedData(data); ASSERT_TRUE(image != nullptr); - ASSERT_EQ(600, image->width()); - ASSERT_EQ(200, image->height()); + EXPECT_EQ(600, image->width()); + EXPECT_EQ(200, image->height()); ImageGeneratorRegistry registry; std::shared_ptr generator = @@ -719,24 +790,36 @@ TEST(ImageDecoderTest, VerifySimpleDecoding) { std::move(generator)); auto compressed_image = ImageDecoderSkia::ImageFromCompressedData( descriptor.get(), 6, 2, fml::tracing::TraceFlow("")); - ASSERT_EQ(compressed_image->width(), 6); - ASSERT_EQ(compressed_image->height(), 2); - ASSERT_EQ(compressed_image->alphaType(), kOpaque_SkAlphaType); + EXPECT_EQ(compressed_image->width(), 6); + EXPECT_EQ(compressed_image->height(), 2); + EXPECT_EQ(compressed_image->alphaType(), kOpaque_SkAlphaType); #if IMPELLER_SUPPORTS_RENDERING + // Bitmap sizes reflect the original image size as resizing is done on the + // GPU if the src size is smaller than the max texture size. std::shared_ptr allocator = std::make_shared(); auto result_1 = ImageDecoderImpeller::DecompressTexture( - descriptor.get(), SkISize::Make(6, 2), {100, 100}, + descriptor.get(), SkISize::Make(6, 2), {1000, 1000}, /*supports_wide_gamut=*/false, allocator); - ASSERT_EQ(result_1.sk_bitmap->width(), 6); - ASSERT_EQ(result_1.sk_bitmap->height(), 2); + EXPECT_EQ(result_1.sk_bitmap->width(), 75); + EXPECT_EQ(result_1.sk_bitmap->height(), 25); + // Bitmap sizes reflect the scaled size if the source size is larger than + // max texture size even if destination size isn't max texture size. auto result_2 = ImageDecoderImpeller::DecompressTexture( + descriptor.get(), SkISize::Make(6, 2), {10, 10}, + /*supports_wide_gamut=*/false, allocator); + EXPECT_EQ(result_2.sk_bitmap->width(), 6); + EXPECT_EQ(result_2.sk_bitmap->height(), 2); + + // If the destination size is larger than the max texture size the image + // is scaled down. + auto result_3 = ImageDecoderImpeller::DecompressTexture( descriptor.get(), SkISize::Make(60, 20), {10, 10}, /*supports_wide_gamut=*/false, allocator); - ASSERT_EQ(result_2.sk_bitmap->width(), 10); - ASSERT_EQ(result_2.sk_bitmap->height(), 10); + EXPECT_EQ(result_3.sk_bitmap->width(), 10); + EXPECT_EQ(result_3.sk_bitmap->height(), 10); #endif // IMPELLER_SUPPORTS_RENDERING } @@ -963,8 +1046,7 @@ TEST_F(ImageDecoderFixtureTest, MultiFrameCodecDidAccessGpuDisabledSyncSwitch) { PostTaskSync(runners.GetIOTaskRunner(), [&]() { io_manager.reset(); }); } -TEST_F(ImageDecoderFixtureTest, - MultiFrameCodecProducesATextureEvenIfGPUIsDisabledOnImpeller) { +TEST_F(ImageDecoderFixtureTest, MultiFrameCodecIsPausedWhenGPUIsUnavailable) { auto settings = CreateSettingsForFixture(); settings.enable_impeller = true; auto vm_ref = DartVMRef::Create(settings); diff --git a/lib/ui/painting/image_encoding_impeller.cc b/lib/ui/painting/image_encoding_impeller.cc index 4f9d7722044f7..1dddb456fac58 100644 --- a/lib/ui/painting/image_encoding_impeller.cc +++ b/lib/ui/painting/image_encoding_impeller.cc @@ -5,6 +5,7 @@ #include "flutter/lib/ui/painting/image_encoding_impeller.h" #include "flutter/lib/ui/painting/image.h" +#include "fml/status.h" #include "impeller/core/device_buffer.h" #include "impeller/core/formats.h" #include "impeller/renderer/command_buffer.h" @@ -90,8 +91,7 @@ void DoConvertImageToRasterImpellerWithRetry( // task on the Context so it can be executed when the GPU becomes available. if (status.code() == fml::StatusCode::kUnavailable) { impeller_context->StoreTaskForGPU( - [dl_image, encode_task = std::move(encode_task), - is_gpu_disabled_sync_switch, impeller_context, + [dl_image, encode_task, is_gpu_disabled_sync_switch, impeller_context, retry_runner]() mutable { auto retry_task = [dl_image, encode_task = std::move(encode_task), is_gpu_disabled_sync_switch, impeller_context] { @@ -111,6 +111,10 @@ void DoConvertImageToRasterImpellerWithRetry( } else { retry_task(); } + }, + [encode_task]() { + encode_task( + fml::Status(fml::StatusCode::kUnavailable, "GPU unavailable.")); }); } else { // Pass on errors that are not `kUnavailable`. diff --git a/lib/ui/painting/image_encoding_unittests.cc b/lib/ui/painting/image_encoding_unittests.cc index 82b6db2eab60a..380647d43d074 100644 --- a/lib/ui/painting/image_encoding_unittests.cc +++ b/lib/ui/painting/image_encoding_unittests.cc @@ -374,8 +374,8 @@ TEST_F(ShellTest, EncodeImageFailsWithoutGPUImpeller) { shell->GetIOManager()->GetImpellerContext(); // This will cause the stored tasks to overflow and start throwing them // away. - for (int i = 0; i < impeller::Context::kMaxTasksAwaitingGPU; ++i) { - impeller_context->StoreTaskForGPU([] {}); + for (int i = 0; i < impeller::Context::kMaxTasksAwaitingGPU; i++) { + impeller_context->StoreTaskForGPU([] {}, [] {}); } }); }; diff --git a/lib/ui/painting/multi_frame_codec.cc b/lib/ui/painting/multi_frame_codec.cc index 2a7e3111f2294..1521c535fd97c 100644 --- a/lib/ui/painting/multi_frame_codec.cc +++ b/lib/ui/painting/multi_frame_codec.cc @@ -147,10 +147,7 @@ MultiFrameCodec::State::GetNextFrameImage( // This is safe regardless of whether the GPU is available or not because // without mipmap creation there is no command buffer encoding done. return ImageDecoderImpeller::UploadTextureToStorage( - impeller_context, std::make_shared(bitmap), - std::make_shared(), - impeller::StorageMode::kHostVisible, - /*create_mips=*/false); + impeller_context, std::make_shared(bitmap)); } #endif // IMPELLER_SUPPORTS_RENDERING diff --git a/shell/common/snapshot_controller_impeller.cc b/shell/common/snapshot_controller_impeller.cc index 68bc9ac46a3c8..8aea6ecb85a70 100644 --- a/shell/common/snapshot_controller_impeller.cc +++ b/shell/common/snapshot_controller_impeller.cc @@ -137,10 +137,11 @@ void SnapshotControllerImpeller::MakeRasterSnapshot( if (context) { context->GetContext()->StoreTaskForGPU( [context, sync_switch, display_list = std::move(display_list), - picture_size, callback = std::move(callback)] { + picture_size, callback] { callback(DoMakeRasterSnapshot(display_list, picture_size, sync_switch, context)); - }); + }, + [callback]() { callback(nullptr); }); } else { callback(nullptr); }