Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
SkRasterPipelineOpContexts.h
Go to the documentation of this file.
1/*
2 * Copyright 2023 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef SkRasterPipelineOpContexts_DEFINED
9#define SkRasterPipelineOpContexts_DEFINED
10
11#include <algorithm>
12#include <cstddef>
13#include <cstdint>
14
15namespace SkSL { class TraceHook; }
16
17// The largest number of pixels we handle at a time. We have a separate value for the largest number
18// of pixels we handle in the highp pipeline. Many of the context structs in this file are only used
19// by stages that have no lowp implementation. They can therefore use the (smaller) highp value to
20// save memory in the arena.
21inline static constexpr int SkRasterPipeline_kMaxStride = 16;
22inline static constexpr int SkRasterPipeline_kMaxStride_highp = 16;
23
24// How much space to allocate for each MemoryCtx scratch buffer, as part of tail-pixel handling.
25inline static constexpr size_t SkRasterPipeline_MaxScratchPerPatch =
26 std::max(SkRasterPipeline_kMaxStride_highp * 16, // 16 == largest highp bpp (RGBA_F32)
27 SkRasterPipeline_kMaxStride * 4); // 4 == largest lowp bpp (RGBA_8888)
28
29// These structs hold the context data for many of the Raster Pipeline ops.
34
35// Raster Pipeline typically processes N (4, 8, 16) pixels at a time, in SIMT fashion. If the
36// number of pixels in a row isn't evenly divisible by N, there will be leftover pixels; this is
37// called the "tail". To avoid reading or writing past the end of any source or destination buffers
38// when we reach the tail:
39//
40// 1) Source buffers have their tail contents copied to a scratch buffer that is at least N wide.
41// In practice, each scratch buffer uses SkRasterPipeline_MaxScratchPerPatch bytes.
42// 2) Each MemoryCtx in the pipeline is patched, such that access to them (at the current scanline
43// and x-offset) will land in the scratch buffer.
44// 3) Pipeline is run as normal (with all memory access happening safely in the scratch buffers).
45// 4) Destination buffers have their tail contents copied back from the scratch buffer.
46// 5) Each MemoryCtx is "un-patched".
47//
48// To do all of this, the pipeline creates a MemoryCtxPatch for each unique MemoryCtx referenced by
49// the pipeline.
57
60
61 void* backup; // Remembers context->pixels so we can restore it
63};
64
66 const void* pixels;
67 int stride;
68 float width;
69 float height;
70 float weights[16]; // for bicubic and bicubic_clamp_8888
71 // Controls whether pixel i-1 or i is selected when floating point sample position is exactly i.
72 bool roundDownAtInteger = false;
73};
74
75// State shared by save_xy, accumulate, and bilinear_* / bicubic_*.
89
91 float scale;
92 float invScale; // cache of 1/scale
93 // When in the reflection portion of mirror tiling we need to snap the opposite direction
94 // at integer sample points than when in the forward direction. This controls which way we bias
95 // in the reflection. It should be 1 if SkRasterPipeline_GatherCtx::roundDownAtInteger is true
96 // and otherwise -1.
97 int mirrorBiasDir = -1;
98};
99
102 float limit_x;
103 float limit_y;
104 // These control which edge of the interval is included (i.e. closed interval at 0 or at limit).
105 // They should be set to limit_x and limit_y if SkRasterPipeline_GatherCtx::roundDownAtInteger
106 // is true and otherwise zero.
109};
110
111enum class SkPerlinNoiseShaderType;
112
119 const uint8_t* latticeSelector; // [256 values]
120 const uint16_t* noiseData; // [4 channels][256 elements][vector of 2]
121};
122
123// State used by mipmap_linear_*
125 // Original coords, saved before the base level logic
128
129 // Base level color
134
135 // Scale factors to transform base level coords to lower level coords
136 float scaleX;
137 float scaleY;
138
140};
141
146
149 int active_pixels /*<= SkRasterPipeline_kMaxStride_highp*/);
150
151 // When called, fn() will have our active pixels available in rgba.
152 // When fn() returns, the pipeline will read back those active pixels from read_from.
154 float* read_from = rgba;
155};
156
157// state shared by stack_checkpoint and stack_rewind
159
172
174 size_t stopCount;
175 float* fs[4];
176 float* bs[4];
177 float* ts;
178};
179
184
190
192 float r,g,b,a;
193 uint16_t rgba[4]; // [0,255] in a 16-bit lane.
194};
195
200
202 const uint8_t *r, *g, *b, *a;
203};
204
205using SkRPOffset = uint32_t;
206
210
215
217 int32_t* dst;
218 const int32_t* src;
219};
220
225
230
235
237 // If we are processing more than 16 pixels at a time, an 8-bit offset won't be sufficient and
238 // `offsets` will need to use uint16_t (or dial down the premultiplication).
239 static_assert(SkRasterPipeline_kMaxStride_highp <= 16);
240
242 uint8_t offsets[4]; // values must be byte offsets (4 * highp-stride * component-index)
243};
244
246 int32_t* ptr;
247 int count;
248 uint16_t offsets[16]; // values must be byte offsets (4 * highp-stride * component-index)
249};
250
252 int32_t* dst;
253 const int32_t* src; // src values must _not_ overlap dst values
254 uint16_t offsets[4]; // values must be byte offsets (4 * highp-stride * component-index)
255};
256
258 int32_t* dst;
259 const int32_t* src;
260 const uint32_t *indirectOffset; // this applies to `src` or `dst` based on the op
261 uint32_t indirectLimit; // the indirect offset is clamped to this upper bound
262 uint32_t slots; // the number of slots to copy
263};
264
266 uint16_t offsets[4]; // values must be byte offsets (4 * highp-stride * component-index)
267};
268
270 int offset; // contains the label ID during compilation, and the program offset when compiled
271};
272
274 uint8_t* tail = nullptr; // lanes past the tail are _never_ active, so we need to exclude them
275};
276
281
284 SkRPOffset offset; // points to a pair of adjacent I32s: {I32 actualValue, I32 defaultMask}
285};
286
292
298
304
306 const int* traceMask;
309 const int* data;
310 const uint32_t *indirectOffset; // can be null; if set, an offset applied to `data`
311 uint32_t indirectLimit; // the indirect offset is clamped to this upper bound
312};
313
314#endif // SkRasterPipelineOpContexts_DEFINED
SkPerlinNoiseShaderType
static constexpr int SkRasterPipeline_kMaxStride
static constexpr int SkRasterPipeline_kMaxStride_highp
uint32_t SkRPOffset
static constexpr size_t SkRasterPipeline_MaxScratchPerPatch
uint32_t fMask[SkRasterPipeline_kMaxStride_highp]
float rgba[4 *SkRasterPipeline_kMaxStride_highp]
void(* fn)(SkRasterPipeline_CallbackCtx *self, int active_pixels)
uint32_t mask[SkRasterPipeline_kMaxStride]
SkRasterPipeline_MemoryCtx * context
std::byte scratch[SkRasterPipeline_MaxScratchPerPatch]
SkRasterPipeline_MemoryCtxInfo info
float x[SkRasterPipeline_kMaxStride_highp]
float g[SkRasterPipeline_kMaxStride_highp]
float r[SkRasterPipeline_kMaxStride_highp]
float b[SkRasterPipeline_kMaxStride_highp]
float y[SkRasterPipeline_kMaxStride_highp]
float a[SkRasterPipeline_kMaxStride_highp]
float dg[SkRasterPipeline_kMaxStride_highp]
float g[SkRasterPipeline_kMaxStride_highp]
float dr[SkRasterPipeline_kMaxStride_highp]
float db[SkRasterPipeline_kMaxStride_highp]
float a[SkRasterPipeline_kMaxStride_highp]
float r[SkRasterPipeline_kMaxStride_highp]
float da[SkRasterPipeline_kMaxStride_highp]
float b[SkRasterPipeline_kMaxStride_highp]
float scalex[SkRasterPipeline_kMaxStride_highp]
float fy[SkRasterPipeline_kMaxStride_highp]
float wy[4][SkRasterPipeline_kMaxStride_highp]
float scaley[SkRasterPipeline_kMaxStride_highp]
float fx[SkRasterPipeline_kMaxStride_highp]
float x[SkRasterPipeline_kMaxStride_highp]
float wx[4][SkRasterPipeline_kMaxStride_highp]
float y[SkRasterPipeline_kMaxStride_highp]