Flutter Engine
 
Loading...
Searching...
No Matches
image_decoder_impeller.cc
Go to the documentation of this file.
1// Copyright 2013 The Flutter Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <format>
8#include <memory>
9
10#include "flutter/fml/closure.h"
22#include "third_party/skia/include/core/SkAlphaType.h"
23#include "third_party/skia/include/core/SkBitmap.h"
24#include "third_party/skia/include/core/SkColorSpace.h"
25#include "third_party/skia/include/core/SkColorType.h"
26#include "third_party/skia/include/core/SkImageInfo.h"
27#include "third_party/skia/include/core/SkMallocPixelRef.h"
28#include "third_party/skia/include/core/SkPixelRef.h"
29#include "third_party/skia/include/core/SkPixmap.h"
30#include "third_party/skia/include/core/SkPoint.h"
31#include "third_party/skia/include/core/SkSize.h"
32
33namespace flutter {
34
35namespace {
36/**
37 * Loads the gamut as a set of three points (triangle).
38 */
39void LoadGamut(SkPoint abc[3], const skcms_Matrix3x3& xyz) {
40 // rx = rX / (rX + rY + rZ)
41 // ry = rY / (rX + rY + rZ)
42 // gx, gy, bx, and gy are calculated similarly.
43 for (int index = 0; index < 3; index++) {
44 float sum = xyz.vals[index][0] + xyz.vals[index][1] + xyz.vals[index][2];
45 abc[index].fX = xyz.vals[index][0] / sum;
46 abc[index].fY = xyz.vals[index][1] / sum;
47 }
48}
49
50/**
51 * Calculates the area of the triangular gamut.
52 */
53float CalculateArea(SkPoint abc[3]) {
54 const SkPoint& a = abc[0];
55 const SkPoint& b = abc[1];
56 const SkPoint& c = abc[2];
57 return 0.5f * fabsf(a.fX * b.fY + b.fX * c.fY - a.fX * c.fY - c.fX * b.fY -
58 b.fX * a.fY);
59}
60
61// Note: This was calculated from SkColorSpace::MakeSRGB().
62static constexpr float kSrgbGamutArea = 0.0982f;
63
64// Source:
65// https://source.chromium.org/chromium/_/skia/skia.git/+/393fb1ec80f41d8ad7d104921b6920e69749fda1:src/codec/SkAndroidCodec.cpp;l=67;drc=46572b4d445f41943059d0e377afc6d6748cd5ca;bpv=1;bpt=0
66bool IsWideGamut(const SkColorSpace* color_space) {
67 if (!color_space) {
68 return false;
69 }
70 skcms_Matrix3x3 xyzd50;
71 color_space->toXYZD50(&xyzd50);
72 SkPoint rgb[3];
73 LoadGamut(rgb, xyzd50);
74 float area = CalculateArea(rgb);
75 return area > kSrgbGamutArea;
76}
77
78static std::optional<impeller::PixelFormat> ToPixelFormat(SkColorType type) {
79 switch (type) {
80 case kRGBA_8888_SkColorType:
82 case kBGRA_8888_SkColorType:
84 case kRGBA_F16_SkColorType:
86 case kBGR_101010x_XR_SkColorType:
88 default:
89 return std::nullopt;
90 }
91 return std::nullopt;
92}
93} // namespace
94
96 const TaskRunners& runners,
97 std::shared_ptr<fml::ConcurrentTaskRunner> concurrent_task_runner,
98 const fml::WeakPtr<IOManager>& io_manager,
99 bool wide_gamut_enabled,
100 const std::shared_ptr<fml::SyncSwitch>& gpu_disabled_switch)
101 : ImageDecoder(runners, std::move(concurrent_task_runner), io_manager),
102 wide_gamut_enabled_(wide_gamut_enabled),
103 gpu_disabled_switch_(gpu_disabled_switch) {
104 std::promise<std::shared_ptr<impeller::Context>> context_promise;
105 context_ = context_promise.get_future();
107 [promise = std::move(context_promise), io_manager]() mutable {
108 if (io_manager) {
109 promise.set_value(io_manager->GetImpellerContext());
110 } else {
111 promise.set_value(nullptr);
112 }
113 }));
114}
115
117
118static SkColorType ChooseCompatibleColorType(SkColorType type) {
119 switch (type) {
120 case kRGBA_F32_SkColorType:
121 return kRGBA_F16_SkColorType;
122 default:
123 return kRGBA_8888_SkColorType;
124 }
125}
126
127static SkAlphaType ChooseCompatibleAlphaType(SkAlphaType type) {
128 return type;
129}
130
132 ImageDescriptor* descriptor,
133 SkISize target_size,
134 impeller::ISize max_texture_size,
135 bool supports_wide_gamut,
136 const std::shared_ptr<const impeller::Capabilities>& capabilities,
137 const std::shared_ptr<impeller::Allocator>& allocator) {
138 TRACE_EVENT0("impeller", __FUNCTION__);
139 if (!descriptor) {
140 std::string decode_error("Invalid descriptor (should never happen)");
141 FML_DLOG(ERROR) << decode_error;
142 return DecompressResult{.decode_error = decode_error};
143 }
144
145 target_size.set(std::min(static_cast<int32_t>(max_texture_size.width),
146 target_size.width()),
147 std::min(static_cast<int32_t>(max_texture_size.height),
148 target_size.height()));
149
150 const SkISize source_size = descriptor->image_info().dimensions();
151 auto decode_size = source_size;
152 if (descriptor->is_compressed()) {
153 decode_size = descriptor->get_scaled_dimensions(std::max(
154 static_cast<float>(target_size.width()) / source_size.width(),
155 static_cast<float>(target_size.height()) / source_size.height()));
156 }
157
158 //----------------------------------------------------------------------------
159 /// 1. Decode the image.
160 ///
161
162 const auto base_image_info = descriptor->image_info();
163 const bool is_wide_gamut =
164 supports_wide_gamut ? IsWideGamut(base_image_info.colorSpace()) : false;
165 SkAlphaType alpha_type =
166 ChooseCompatibleAlphaType(base_image_info.alphaType());
167 SkImageInfo image_info;
168 if (is_wide_gamut) {
169 SkColorType color_type = alpha_type == SkAlphaType::kOpaque_SkAlphaType
170 ? kBGR_101010x_XR_SkColorType
171 : kRGBA_F16_SkColorType;
172 image_info =
173 base_image_info.makeWH(decode_size.width(), decode_size.height())
174 .makeColorType(color_type)
175 .makeAlphaType(alpha_type)
176 .makeColorSpace(SkColorSpace::MakeSRGB());
177 } else {
178 image_info =
179 base_image_info.makeWH(decode_size.width(), decode_size.height())
180 .makeColorType(
181 ChooseCompatibleColorType(base_image_info.colorType()))
182 .makeAlphaType(alpha_type)
183 .makeColorSpace(SkColorSpace::MakeSRGB());
184 }
185
186 const auto pixel_format = ToPixelFormat(image_info.colorType());
187 if (!pixel_format.has_value()) {
188 std::string decode_error(
189 std::format("Codec pixel format is not supported (SkColorType={})",
190 static_cast<int>(image_info.colorType())));
191 FML_DLOG(ERROR) << decode_error;
192 return DecompressResult{.decode_error = decode_error};
193 }
194
195 auto bitmap = std::make_shared<SkBitmap>();
196 bitmap->setInfo(image_info);
197 auto bitmap_allocator = std::make_shared<ImpellerAllocator>(allocator);
198
199 if (descriptor->is_compressed()) {
200 if (!bitmap->tryAllocPixels(bitmap_allocator.get())) {
201 std::string decode_error(
202 "Could not allocate intermediate for image decompression.");
203 FML_DLOG(ERROR) << decode_error;
204 return DecompressResult{.decode_error = decode_error};
205 }
206 // Decode the image into the image generator's closest supported size.
207 if (!descriptor->get_pixels(bitmap->pixmap())) {
208 std::string decode_error("Could not decompress image.");
209 FML_DLOG(ERROR) << decode_error;
210 return DecompressResult{.decode_error = decode_error};
211 }
212 } else {
213 auto temp_bitmap = std::make_shared<SkBitmap>();
214 temp_bitmap->setInfo(base_image_info);
215 auto pixel_ref = SkMallocPixelRef::MakeWithData(
216 base_image_info, descriptor->row_bytes(), descriptor->data());
217 temp_bitmap->setPixelRef(pixel_ref, 0, 0);
218
219 if (!bitmap->tryAllocPixels(bitmap_allocator.get())) {
220 std::string decode_error(
221 "Could not allocate intermediate for pixel conversion.");
222 FML_DLOG(ERROR) << decode_error;
223 return DecompressResult{.decode_error = decode_error};
224 }
225 temp_bitmap->readPixels(bitmap->pixmap());
226 bitmap->setImmutable();
227 }
228
229 // If the image is unpremultiplied, fix it.
230 if (alpha_type == SkAlphaType::kUnpremul_SkAlphaType) {
231 // Single copy of ImpellerAllocator crashes.
232 auto premul_allocator = std::make_shared<ImpellerAllocator>(allocator);
233 auto premul_bitmap = std::make_shared<SkBitmap>();
234 premul_bitmap->setInfo(bitmap->info().makeAlphaType(kPremul_SkAlphaType));
235 if (!premul_bitmap->tryAllocPixels(premul_allocator.get())) {
236 std::string decode_error(
237 "Could not allocate intermediate for premultiplication conversion.");
238 FML_DLOG(ERROR) << decode_error;
239 return DecompressResult{.decode_error = decode_error};
240 }
241 // readPixels() handles converting pixels to premultiplied form.
242 bitmap->readPixels(premul_bitmap->pixmap());
243 premul_bitmap->setImmutable();
244 bitmap_allocator = premul_allocator;
245 bitmap = premul_bitmap;
246 }
247
248 std::shared_ptr<impeller::DeviceBuffer> buffer =
249 bitmap_allocator->GetDeviceBuffer();
250 if (!buffer) {
251 return DecompressResult{.decode_error = "Unable to get device buffer"};
252 }
253 buffer->Flush();
254
255 std::optional<SkImageInfo> resize_info =
256 bitmap->dimensions() == target_size
257 ? std::nullopt
258 : std::optional<SkImageInfo>(image_info.makeDimensions(target_size));
259
260 if (source_size.width() > max_texture_size.width ||
261 source_size.height() > max_texture_size.height ||
262 !capabilities->SupportsTextureToTextureBlits()) {
263 //----------------------------------------------------------------------------
264 /// 2. If the decoded image isn't the requested target size and the src size
265 /// exceeds the device max texture size, perform a slow CPU resize.
266 ///
267 TRACE_EVENT0("impeller", "SlowCPUDecodeScale");
268 const auto scaled_image_info = image_info.makeDimensions(target_size);
269
270 auto scaled_bitmap = std::make_shared<SkBitmap>();
271 auto scaled_allocator = std::make_shared<ImpellerAllocator>(allocator);
272 scaled_bitmap->setInfo(scaled_image_info);
273 if (!scaled_bitmap->tryAllocPixels(scaled_allocator.get())) {
274 std::string decode_error(
275 "Could not allocate scaled bitmap for image decompression.");
276 FML_DLOG(ERROR) << decode_error;
277 return DecompressResult{.decode_error = decode_error};
278 }
279 if (!bitmap->pixmap().scalePixels(
280 scaled_bitmap->pixmap(),
281 SkSamplingOptions(SkFilterMode::kLinear, SkMipmapMode::kNone))) {
282 FML_LOG(ERROR) << "Could not scale decoded bitmap data.";
283 }
284 scaled_bitmap->setImmutable();
285
286 std::shared_ptr<impeller::DeviceBuffer> buffer =
287 scaled_allocator->GetDeviceBuffer();
288 if (!buffer) {
289 return DecompressResult{.decode_error = "Unable to get device buffer"};
290 }
291 buffer->Flush();
292
293 return DecompressResult{.device_buffer = std::move(buffer),
294 .sk_bitmap = scaled_bitmap,
295 .image_info = scaled_bitmap->info()};
296 }
297
298 return DecompressResult{.device_buffer = std::move(buffer),
299 .sk_bitmap = bitmap,
300 .image_info = bitmap->info(),
301 .resize_info = resize_info};
302}
303
304// static
305std::pair<sk_sp<DlImage>, std::string>
306ImageDecoderImpeller::UnsafeUploadTextureToPrivate(
307 const std::shared_ptr<impeller::Context>& context,
308 const std::shared_ptr<impeller::DeviceBuffer>& buffer,
309 const SkImageInfo& image_info,
310 const std::optional<SkImageInfo>& resize_info) {
311 const auto pixel_format = ToPixelFormat(image_info.colorType());
312 if (!pixel_format) {
313 std::string decode_error(
314 std::format("Unsupported pixel format (SkColorType={})",
315 static_cast<int>(image_info.colorType())));
316 FML_DLOG(ERROR) << decode_error;
317 return std::make_pair(nullptr, decode_error);
318 }
319
320 impeller::TextureDescriptor texture_descriptor;
322 texture_descriptor.format = pixel_format.value();
323 texture_descriptor.size = {image_info.width(), image_info.height()};
324 texture_descriptor.mip_count = texture_descriptor.size.MipCount();
325 if (context->GetBackendType() == impeller::Context::BackendType::kMetal &&
326 resize_info.has_value()) {
327 // The MPS used to resize images on iOS does not require mip generation.
328 // Remove mip count if we are resizing the image on the GPU.
329 texture_descriptor.mip_count = 1;
330 }
331
332 auto dest_texture =
333 context->GetResourceAllocator()->CreateTexture(texture_descriptor);
334 if (!dest_texture) {
335 std::string decode_error("Could not create Impeller texture.");
336 FML_DLOG(ERROR) << decode_error;
337 return std::make_pair(nullptr, decode_error);
338 }
339
340 dest_texture->SetLabel(
341 std::format("ui.Image({})", static_cast<const void*>(dest_texture.get()))
342 .c_str());
343
344 auto command_buffer = context->CreateCommandBuffer();
345 if (!command_buffer) {
346 std::string decode_error(
347 "Could not create command buffer for mipmap generation.");
348 FML_DLOG(ERROR) << decode_error;
349 return std::make_pair(nullptr, decode_error);
350 }
351 command_buffer->SetLabel("Mipmap Command Buffer");
352
353 auto blit_pass = command_buffer->CreateBlitPass();
354 if (!blit_pass) {
355 std::string decode_error(
356 "Could not create blit pass for mipmap generation.");
357 FML_DLOG(ERROR) << decode_error;
358 return std::make_pair(nullptr, decode_error);
359 }
360 blit_pass->SetLabel("Mipmap Blit Pass");
361 blit_pass->AddCopy(impeller::DeviceBuffer::AsBufferView(buffer),
362 dest_texture);
363 if (texture_descriptor.mip_count > 1) {
364 blit_pass->GenerateMipmap(dest_texture);
365 }
366
367 std::shared_ptr<impeller::Texture> result_texture = dest_texture;
368 if (resize_info.has_value()) {
369 impeller::TextureDescriptor resize_desc;
371 resize_desc.format = pixel_format.value();
372 resize_desc.size = {resize_info->width(), resize_info->height()};
373 resize_desc.mip_count = resize_desc.size.MipCount();
375 if (context->GetBackendType() == impeller::Context::BackendType::kMetal) {
376 // Resizing requires a MPS on Metal platforms.
379 }
380 auto resize_texture =
381 context->GetResourceAllocator()->CreateTexture(resize_desc);
382 if (!resize_texture) {
383 std::string decode_error("Could not create resized Impeller texture.");
384 FML_DLOG(ERROR) << decode_error;
385 return std::make_pair(nullptr, decode_error);
386 }
387
388 blit_pass->ResizeTexture(/*source=*/dest_texture,
389 /*destination=*/resize_texture);
390 if (resize_desc.mip_count > 1) {
391 blit_pass->GenerateMipmap(resize_texture);
392 }
393
394 result_texture = std::move(resize_texture);
395 }
396 blit_pass->EncodeCommands();
397 if (!context->GetCommandQueue()
398 ->Submit(
399 {command_buffer},
401 if (status == impeller::CommandBuffer::Status::kError) {
402 FML_LOG(ERROR)
403 << "GPU Error submitting image decoding command buffer.";
404 }
405 },
406 /*block_on_schedule=*/true)
407 .ok()) {
408 std::string decode_error("Failed to submit image decoding command buffer.");
409 FML_DLOG(ERROR) << decode_error;
410 return std::make_pair(nullptr, decode_error);
411 }
412
413 // Flush the pending command buffer to ensure that its output becomes visible
414 // to the raster thread.
415 if (context->AddTrackingFence(result_texture)) {
416 command_buffer->WaitUntilScheduled();
417 } else {
418 command_buffer->WaitUntilCompleted();
419 }
420
421 context->DisposeThreadLocalCachedResources();
422
423 return std::make_pair(
424 impeller::DlImageImpeller::Make(std::move(result_texture)),
425 std::string());
426}
427
428void ImageDecoderImpeller::UploadTextureToPrivate(
429 ImageResult result,
430 const std::shared_ptr<impeller::Context>& context,
431 const std::shared_ptr<impeller::DeviceBuffer>& buffer,
432 const SkImageInfo& image_info,
433 const std::shared_ptr<SkBitmap>& bitmap,
434 const std::optional<SkImageInfo>& resize_info,
435 const std::shared_ptr<const fml::SyncSwitch>& gpu_disabled_switch) {
436 TRACE_EVENT0("impeller", __FUNCTION__);
437 if (!context) {
438 result(nullptr, "No Impeller context is available");
439 return;
440 }
441 if (!buffer) {
442 result(nullptr, "No Impeller device buffer is available");
443 return;
444 }
445
446 gpu_disabled_switch->Execute(
448 .SetIfFalse([&result, context, buffer, image_info, resize_info] {
449 sk_sp<DlImage> image;
450 std::string decode_error;
451 std::tie(image, decode_error) = std::tie(image, decode_error) =
452 UnsafeUploadTextureToPrivate(context, buffer, image_info,
453 resize_info);
454 result(image, decode_error);
455 })
456 .SetIfTrue([&result, context, buffer, image_info, resize_info] {
457 auto result_ptr = std::make_shared<ImageResult>(std::move(result));
458 context->StoreTaskForGPU(
459 [result_ptr, context, buffer, image_info, resize_info]() {
460 sk_sp<DlImage> image;
461 std::string decode_error;
462 std::tie(image, decode_error) = UnsafeUploadTextureToPrivate(
463 context, buffer, image_info, resize_info);
464 (*result_ptr)(image, decode_error);
465 },
466 [result_ptr]() {
467 (*result_ptr)(
468 nullptr,
469 "Image upload failed due to loss of GPU access.");
470 });
471 }));
472}
473
474std::pair<sk_sp<DlImage>, std::string>
475ImageDecoderImpeller::UploadTextureToStorage(
476 const std::shared_ptr<impeller::Context>& context,
477 std::shared_ptr<SkBitmap> bitmap) {
478 TRACE_EVENT0("impeller", __FUNCTION__);
479 if (!context) {
480 return std::make_pair(nullptr, "No Impeller context is available");
481 }
482 if (!bitmap) {
483 return std::make_pair(nullptr, "No texture bitmap is available");
484 }
485 const auto image_info = bitmap->info();
486 const auto pixel_format = ToPixelFormat(image_info.colorType());
487 if (!pixel_format) {
488 std::string decode_error(
489 std::format("Unsupported pixel format (SkColorType={})",
490 static_cast<int>(image_info.colorType())));
491 FML_DLOG(ERROR) << decode_error;
492 return std::make_pair(nullptr, decode_error);
493 }
494
495 impeller::TextureDescriptor texture_descriptor;
497 texture_descriptor.format = pixel_format.value();
498 texture_descriptor.size = {image_info.width(), image_info.height()};
499 texture_descriptor.mip_count = 1;
500
501 auto texture =
502 context->GetResourceAllocator()->CreateTexture(texture_descriptor);
503 if (!texture) {
504 std::string decode_error("Could not create Impeller texture.");
505 FML_DLOG(ERROR) << decode_error;
506 return std::make_pair(nullptr, decode_error);
507 }
508
509 auto mapping = std::make_shared<fml::NonOwnedMapping>(
510 reinterpret_cast<const uint8_t*>(bitmap->getAddr(0, 0)), // data
511 texture_descriptor.GetByteSizeOfBaseMipLevel(), // size
512 [bitmap](auto, auto) mutable { bitmap.reset(); } // proc
513 );
514
515 if (!texture->SetContents(mapping)) {
516 std::string decode_error("Could not copy contents into Impeller texture.");
517 FML_DLOG(ERROR) << decode_error;
518 return std::make_pair(nullptr, decode_error);
519 }
520
521 texture->SetLabel(
522 std::format("ui.Image({})", static_cast<const void*>(texture.get()))
523 .c_str());
524
525 context->DisposeThreadLocalCachedResources();
526
527 return std::make_pair(impeller::DlImageImpeller::Make(std::move(texture)),
528 std::string());
529}
530
531// |ImageDecoder|
532void ImageDecoderImpeller::Decode(fml::RefPtr<ImageDescriptor> descriptor,
533 uint32_t target_width,
534 uint32_t target_height,
535 const ImageResult& p_result) {
536 FML_DCHECK(descriptor);
537 FML_DCHECK(p_result);
538
539 // Wrap the result callback so that it can be invoked from any thread.
540 auto raw_descriptor = descriptor.get();
541 raw_descriptor->AddRef();
542 ImageResult result = [p_result, //
543 raw_descriptor, //
544 ui_runner = runners_.GetUITaskRunner() //
545 ](const auto& image, const auto& decode_error) {
546 ui_runner->PostTask([raw_descriptor, p_result, image, decode_error]() {
547 raw_descriptor->Release();
548 p_result(std::move(image), decode_error);
549 });
550 };
551
552 concurrent_task_runner_->PostTask(
553 [raw_descriptor, //
554 context = context_.get(), //
555 target_size = SkISize::Make(target_width, target_height), //
556 io_runner = runners_.GetIOTaskRunner(), //
557 result,
558 wide_gamut_enabled = wide_gamut_enabled_, //
559 gpu_disabled_switch = gpu_disabled_switch_]() {
560#if FML_OS_IOS_SIMULATOR
561 // No-op backend.
562 if (!context) {
563 return;
564 }
565#endif // FML_OS_IOS_SIMULATOR
566
567 if (!context) {
568 result(nullptr, "No Impeller context is available");
569 return;
570 }
571 auto max_size_supported =
572 context->GetResourceAllocator()->GetMaxTextureSizeSupported();
573
574 // Always decompress on the concurrent runner.
575 auto bitmap_result = DecompressTexture(
576 raw_descriptor, target_size, max_size_supported,
577 /*supports_wide_gamut=*/wide_gamut_enabled &&
578 context->GetCapabilities()->SupportsExtendedRangeFormats(),
579 context->GetCapabilities(), context->GetResourceAllocator());
580 if (!bitmap_result.device_buffer) {
581 result(nullptr, bitmap_result.decode_error);
582 return;
583 }
584
585 auto upload_texture_and_invoke_result = [result, context, bitmap_result,
586 gpu_disabled_switch]() {
587 UploadTextureToPrivate(result, context, //
588 bitmap_result.device_buffer, //
589 bitmap_result.image_info, //
590 bitmap_result.sk_bitmap, //
591 bitmap_result.resize_info, //
592 gpu_disabled_switch //
593 );
594 };
595 // The I/O image uploads are not threadsafe on GLES.
596 if (context->GetBackendType() ==
598 io_runner->PostTask(upload_texture_and_invoke_result);
599 } else {
600 upload_texture_and_invoke_result();
601 }
602 });
603}
604
605ImpellerAllocator::ImpellerAllocator(
606 std::shared_ptr<impeller::Allocator> allocator)
607 : allocator_(std::move(allocator)) {}
608
609std::shared_ptr<impeller::DeviceBuffer> ImpellerAllocator::GetDeviceBuffer()
610 const {
611 return buffer_;
612}
613
614bool ImpellerAllocator::allocPixelRef(SkBitmap* bitmap) {
615 if (!bitmap) {
616 return false;
617 }
618 const SkImageInfo& info = bitmap->info();
619 if (kUnknown_SkColorType == info.colorType() || info.width() < 0 ||
620 info.height() < 0 || !info.validRowBytes(bitmap->rowBytes())) {
621 return false;
622 }
623
626 descriptor.size = ((bitmap->height() - 1) * bitmap->rowBytes()) +
627 (bitmap->width() * bitmap->bytesPerPixel());
628
629 std::shared_ptr<impeller::DeviceBuffer> device_buffer =
630 allocator_->CreateBuffer(descriptor);
631 if (!device_buffer) {
632 return false;
633 }
634
635 struct ImpellerPixelRef final : public SkPixelRef {
636 ImpellerPixelRef(int w, int h, void* s, size_t r)
637 : SkPixelRef(w, h, s, r) {}
638
639 ~ImpellerPixelRef() override {}
640 };
641
642 auto pixel_ref = sk_sp<SkPixelRef>(
643 new ImpellerPixelRef(info.width(), info.height(),
644 device_buffer->OnGetContents(), bitmap->rowBytes()));
645
646 bitmap->setPixelRef(std::move(pixel_ref), 0, 0);
647 buffer_ = std::move(device_buffer);
648 return true;
649}
650
651} // namespace flutter
GLenum type
std::function< void(sk_sp< DlImage >, std::string)> ImageResult
ImageDecoderImpeller(const TaskRunners &runners, std::shared_ptr< fml::ConcurrentTaskRunner > concurrent_task_runner, const fml::WeakPtr< IOManager > &io_manager, bool supports_wide_gamut, const std::shared_ptr< fml::SyncSwitch > &gpu_disabled_switch)
static DecompressResult DecompressTexture(ImageDescriptor *descriptor, SkISize target_size, impeller::ISize max_texture_size, bool supports_wide_gamut, const std::shared_ptr< const impeller::Capabilities > &capabilities, const std::shared_ptr< impeller::Allocator > &allocator)
Creates an image descriptor for encoded or decoded image data, describing the width,...
const SkImageInfo & image_info() const
The orientation corrected image info for this image.
SkISize get_scaled_dimensions(float scale)
Gets the scaled dimensions of this image, if backed by an ImageGenerator that can perform efficient s...
int row_bytes() const
The byte length of the first row of the image. Defaults to width() * 4.
bool get_pixels(const SkPixmap &pixmap) const
Gets pixels for this image transformed based on the EXIF orientation tag, if applicable.
bool is_compressed() const
Whether this descriptor represents compressed (encoded) data or not.
sk_sp< SkData > data() const
The underlying buffer for this image.
bool allocPixelRef(SkBitmap *bitmap) override
std::shared_ptr< impeller::DeviceBuffer > GetDeviceBuffer() const
fml::RefPtr< fml::TaskRunner > GetIOTaskRunner() const
T * get() const
Definition ref_ptr.h:117
virtual void PostTask(const fml::closure &task) override
static BufferView AsBufferView(std::shared_ptr< DeviceBuffer > buffer)
Create a buffer view of this entire buffer.
static sk_sp< DlImageImpeller > Make(std::shared_ptr< Texture > texture, OwningContext owning_context=OwningContext::kIO)
FlutterVulkanImage * image
#define FML_DLOG(severity)
Definition logging.h:121
#define FML_LOG(severity)
Definition logging.h:101
#define FML_DCHECK(condition)
Definition logging.h:122
FlTexture * texture
static SkAlphaType ChooseCompatibleAlphaType(SkAlphaType type)
static SkColorType ChooseCompatibleColorType(SkColorType type)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder h
Definition switch_defs.h:54
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set profile Make the profiler discard new samples once the profiler sample buffer is full When this flag is not the profiler sample buffer is used as a ring buffer
Definition switch_defs.h:98
internal::CopyableLambda< T > MakeCopyable(T lambda)
constexpr PixelFormat ToPixelFormat(vk::Format format)
Definition formats_vk.h:183
Definition ref_ptr.h:261
uint32_t color_type
uint32_t alpha_type
std::shared_ptr< impeller::DeviceBuffer > device_buffer
Represents the 2 code paths available when calling |SyncSwitchExecute|.
Definition sync_switch.h:35
Type height
Definition size.h:29
Type width
Definition size.h:28
constexpr size_t MipCount() const
Return the mip count of the texture.
Definition size.h:137
A lightweight object that describes the attributes of a texture that can then used an allocator to cr...
constexpr size_t GetByteSizeOfBaseMipLevel() const
#define TRACE_EVENT0(category_group, name)