40#if defined(GR_TEST_UTILS)
47 return ((
size + 3) & ~3) >> 2;
61 return (r5 << 11) | (g6 << 5) | b5;
82void GrTwoColorBC1Compress(
const SkPixmap& pixmap,
SkColor otherColor,
char* dstPixels) {
94 for (
int y = 0;
y < numYBlocks; ++
y) {
95 for (
int x = 0;
x < numXBlocks; ++
x) {
99 for (
int i = 0;
i < 4; ++
i) {
100 for (
int j = 0; j < 4; ++j, shift += 2) {
117 dstBlocks[
y*numXBlocks +
x] = block;
126 SkASSERT(individualMipOffsets && individualMipOffsets->
empty());
131 size_t combinedBufferSize = baseDimensions.
width() * bytesPerPixel * baseDimensions.
height();
132 SkISize levelDimensions = baseDimensions;
136 SkASSERT(bytesPerPixel == 1 || bytesPerPixel == 2 || bytesPerPixel == 3 ||
137 bytesPerPixel == 4 || bytesPerPixel == 8 || bytesPerPixel == 16);
138 int desiredAlignment = (bytesPerPixel == 3) ? 12 : (bytesPerPixel > 4 ? bytesPerPixel : 4);
140 for (
int currentMipLevel = 1; currentMipLevel < mipLevelCount; ++currentMipLevel) {
144 size_t trimmedSize = levelDimensions.
area() * bytesPerPixel;
145 const size_t alignmentDiff = combinedBufferSize % desiredAlignment;
146 if (alignmentDiff != 0) {
147 combinedBufferSize += desiredAlignment - alignmentDiff;
149 SkASSERT((0 == combinedBufferSize % 4) && (0 == combinedBufferSize % bytesPerPixel));
151 individualMipOffsets->
push_back(combinedBufferSize);
152 combinedBufferSize += trimmedSize;
155 SkASSERT(individualMipOffsets->
size() == mipLevelCount);
156 return combinedBufferSize;
160 bool* isNormalized,
bool* isSRGB) {
162 *isNormalized =
true;
169 *
load = SkRasterPipelineOp::load_565;
break;
172 *
load = SkRasterPipelineOp::load_4444;
break;
174 *
load = SkRasterPipelineOp::load_4444;
break;
191 *isNormalized =
false;
194 *isNormalized =
false;
197 *isNormalized =
false;
245 LumMode* lumMode,
bool* isNormalized,
248 *isNormalized =
true;
256 *
store = SkRasterPipelineOp::store_565;
break;
259 *
store = SkRasterPipelineOp::store_4444;
break;
261 *
store = SkRasterPipelineOp::store_4444;
break;
266 *
store = SkRasterPipelineOp::store_1010102;
277 *isNormalized =
false;
280 *isNormalized =
false;
283 *isNormalized =
false;
286 *isNormalized =
false;
295 *
store = SkRasterPipelineOp::store_8888;
298 *
store = SkRasterPipelineOp::store_8888;
301 *
store = SkRasterPipelineOp::store_8888;
304 *
store = SkRasterPipelineOp::store_a8;
307 *
store = SkRasterPipelineOp::store_a16;
310 *
store = SkRasterPipelineOp::store_af16;
313 *
store = SkRasterPipelineOp::store_af16;
316 *
store = SkRasterPipelineOp::store_a8;
320 *
store = SkRasterPipelineOp::store_rg88;
323 *
store = SkRasterPipelineOp::store_8888;
337 if (
src.dimensions().isEmpty() ||
dst.dimensions().isEmpty()) {
343 if (!
src.hasPixels() || !
dst.hasPixels()) {
346 if (
dst.dimensions() !=
src.dimensions()) {
357 auto* tRow =
reinterpret_cast<const char*
>(temp.
addr());
358 auto* dRow =
reinterpret_cast<char*
>(
dst.addr());
359 for (
int y = 0;
y <
dst.height(); ++
y, tRow += temp.
rowBytes(), dRow +=
dst.rowBytes()) {
360 for (
int x = 0;
x <
dst.width(); ++
x) {
361 auto t = tRow +
x*
sizeof(uint32_t);
371 auto* sRow =
reinterpret_cast<const char*
>(
src.addr());
372 auto* tRow =
reinterpret_cast<char*
>(temp.
addr());
373 for (
int y = 0;
y <
src.height(); ++
y, sRow +=
src.rowBytes(), tRow += temp.
rowBytes()) {
374 for (
int x = 0;
x <
src.width(); ++
x) {
376 auto t = tRow +
x*
sizeof(uint32_t);
378 t[3] =
static_cast<char>(0xFF);
384 size_t srcBpp =
src.info().bpp();
385 size_t dstBpp =
dst.info().bpp();
395 bool alphaOrCSConversion =
398 if (
src.colorType() ==
dst.colorType() && !alphaOrCSConversion) {
399 size_t tightRB = dstBpp *
dst.width();
401 auto s =
static_cast<const char*
>(
src.addr());
402 auto d = SkTAddOffset<char>(
dst.addr(),
dst.rowBytes()*(
dst.height() - 1));
403 for (
int y = 0;
y <
dst.height(); ++
y,
d -=
dst.rowBytes(),
s +=
src.rowBytes()) {
404 memcpy(
d,
s, tightRB);
408 src.addr(),
src.rowBytes(),
409 tightRB,
src.height());
415 bool srcIsNormalized;
424 bool dstIsNormalized;
434 if (alphaOrCSConversion) {
435 steps.
init(
src.colorSpace(),
src.alphaType(),
dst.colorSpace(),
dst.alphaType());
442 srcCtx{
const_cast<void*
>(
src.addr()),
SkToInt(
src.rowBytes()/srcBpp)},
451 srcCtx.pixels =
static_cast<char*
>(srcCtx.pixels) +
src.rowBytes()*(
height - 1);
455 bool hasConversion = alphaOrCSConversion || lumMode !=
LumMode::kNone;
457 if (srcIsSRGB && dstIsSRGB && !hasConversion) {
459 srcIsSRGB = dstIsSRGB =
false;
462 hasConversion = hasConversion || srcIsSRGB || dstIsSRGB;
467 loadSwizzle.apply(&pipeline);
471 if (alphaOrCSConversion) {
472 steps->apply(&pipeline);
478 pipeline.
append(SkRasterPipelineOp::bt709_luminance_or_luma_to_rgb);
481 pipeline.
append(SkRasterPipelineOp::bt709_luminance_or_luma_to_alpha);
492 storeSwizzle.apply(&pipeline);
494 loadStoreSwizzle.
apply(&pipeline);
497 auto pipelineFn = pipeline.
compile();
498 for (
int i = 0;
i < cnt; ++
i) {
500 srcCtx.pixels =
static_cast<char*
>(srcCtx.pixels) -
src.rowBytes();
501 dstCtx.pixels =
static_cast<char*
>(dstCtx.pixels) +
dst.rowBytes();
522 for (
int y = 0;
y < dstInfo.
height(); ++
y) {
523 char*
d =
static_cast<char*
>(
dst) +
y * dstRB;
524 for (
int x = 0;
x < dstInfo.
width(); ++
x,
d += 3) {
536 &isNormalized, &dstIsSRGB);
545 pipeline.
append(SkRasterPipelineOp::bt709_luminance_or_luma_to_rgb);
548 pipeline.
append(SkRasterPipelineOp::bt709_luminance_or_luma_to_alpha);
558 storeSwizzle.
apply(&pipeline);
bool GrConvertPixels(const GrPixmap &dst, const GrCPixmap &src, bool flipY)
size_t GrComputeTightCombinedBufferSize(size_t bytesPerPixel, SkISize baseDimensions, TArray< size_t > *individualMipOffsets, int mipLevelCount)
bool GrClearImage(const GrImageInfo &dstInfo, void *dst, size_t dstRB, std::array< float, 4 > color)
static skgpu::Swizzle get_dst_swizzle_and_store(GrColorType ct, SkRasterPipelineOp *store, LumMode *lumMode, bool *isNormalized, bool *isSRGB)
static skgpu::Swizzle get_load_and_src_swizzle(GrColorType ct, SkRasterPipelineOp *load, bool *isNormalized, bool *isSRGB)
static const uint32_t rgba[kNumPixels]
@ kPremul_SkAlphaType
pixel components are premultiplied by alpha
#define SK_ABORT(message,...)
@ kRGBA_8888_SkColorType
pixel with 8 bits for red, green, blue, alpha; in 32-bit word
#define SkColorGetR(color)
#define SkColorGetG(color)
constexpr SkColor SK_ColorTRANSPARENT
constexpr SkColor SK_ColorBLACK
#define SkColorGetB(color)
static U8CPU SkMulDiv255Round(U16CPU a, U16CPU b)
static void SkRectMemcpy(void *dst, size_t dstRB, const void *src, size_t srcRB, size_t trimRowBytes, int rowCount)
void swap(sk_sp< T > &a, sk_sp< T > &b)
constexpr int SkToInt(S x)
static uint32_t premul(uint32_t color)
static uint16_t to565(SkColor col)
static int num_4x4_blocks(int size)
static void create_BC1_block(BC1Block *block, bool transparent)
GrColorType colorType() const
size_t minRowBytes() const
static GrPixmap Allocate(const GrImageInfo &info)
static bool Equals(const SkColorSpace *, const SkColorSpace *)
SkColor getColor(int x, int y) const
SkColorType colorType() const
void run(size_t x, size_t y, size_t w, size_t h) const
void appendTransferFunction(const skcms_TransferFunction &)
void append(SkRasterPipelineOp, void *=nullptr)
std::function< void(size_t, size_t, size_t, size_t)> compile() const
void appendConstantColor(SkArenaAlloc *, const float rgba[4])
T * init(Args &&... args)
static constexpr Swizzle Concat(const Swizzle &a, const Swizzle &b)
void apply(SkRasterPipeline *) const
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE auto & d
static float max(float r, float g, float b)
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
const skcms_TransferFunction * skcms_sRGB_Inverse_TransferFunction()
const skcms_TransferFunction * skcms_sRGB_TransferFunction()
constexpr int32_t width() const
constexpr int32_t height() const
constexpr int64_t area() const
#define TRACE_EVENT0(category_group, name)