Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
Classes | Namespaces | Macros | Typedefs | Enumerations | Variables
SkSLRasterPipelineBuilder.h File Reference
#include "include/core/SkTypes.h"
#include "include/core/SkSpan.h"
#include "include/private/base/SkTArray.h"
#include "src/base/SkUtils.h"
#include "src/core/SkRasterPipelineOpList.h"
#include <cstddef>
#include <cstdint>
#include <memory>

Go to the source code of this file.

Classes

struct  SkSL::RP::SlotRange
 
struct  SkSL::RP::Instruction
 
class  SkSL::RP::Callbacks
 
class  SkSL::RP::Program
 
class  SkSL::RP::Builder
 

Namespaces

namespace  SkSL
 
namespace  SkSL::RP
 

Macros

#define SKRP_EXTENDED_OPS(M)
 
#define M(stage)   stage,
 
#define M(stage)   stage,
 

Typedefs

using SkRPOffset = uint32_t
 
using SkSL::RP::Slot = int
 

Enumerations

enum class  SkSL::RP::ProgramOp { SkSL::RP::M }
 
enum class  SkSL::RP::BuilderOp {
  SkSL::RP::M , SkSL::RP::push_clone , SkSL::RP::push_clone_from_stack , SkSL::RP::push_clone_indirect_from_stack ,
  SkSL::RP::push_constant , SkSL::RP::push_immutable , SkSL::RP::push_immutable_indirect , SkSL::RP::push_slots ,
  SkSL::RP::push_slots_indirect , SkSL::RP::push_uniform , SkSL::RP::push_uniform_indirect , SkSL::RP::copy_stack_to_slots ,
  SkSL::RP::copy_stack_to_slots_unmasked , SkSL::RP::copy_stack_to_slots_indirect , SkSL::RP::copy_uniform_to_slots_unmasked , SkSL::RP::store_immutable_value ,
  SkSL::RP::swizzle_copy_stack_to_slots , SkSL::RP::swizzle_copy_stack_to_slots_indirect , SkSL::RP::discard_stack , SkSL::RP::pad_stack ,
  SkSL::RP::select , SkSL::RP::push_condition_mask , SkSL::RP::pop_condition_mask , SkSL::RP::push_loop_mask ,
  SkSL::RP::pop_loop_mask , SkSL::RP::pop_and_reenable_loop_mask , SkSL::RP::push_return_mask , SkSL::RP::pop_return_mask ,
  SkSL::RP::push_src_rgba , SkSL::RP::push_dst_rgba , SkSL::RP::push_device_xy01 , SkSL::RP::pop_src_rgba ,
  SkSL::RP::pop_dst_rgba , SkSL::RP::trace_var_indirect , SkSL::RP::branch_if_no_active_lanes_on_stack_top_equal , SkSL::RP::unsupported
}
 

Variables

constexpr Slot SkSL::RP::NA = -1
 

Macro Definition Documentation

◆ M [1/2]

#define M (   stage)    stage,

Definition at line 62 of file SkSLRasterPipelineBuilder.h.

◆ M [2/2]

#define M (   stage)    stage,

Definition at line 62 of file SkSLRasterPipelineBuilder.h.

◆ SKRP_EXTENDED_OPS

#define SKRP_EXTENDED_OPS (   M)
Value:
/* branch targets */ \
M(label) \
\
/* child programs */ \
M(invoke_shader) \
M(invoke_color_filter) \
M(invoke_blender) \
\
/* color space transforms */ \
M(invoke_to_linear_srgb) \
M(invoke_from_linear_srgb)

Definition at line 45 of file SkSLRasterPipelineBuilder.h.

60 {
61 #define M(stage) stage,
62 // A finished program can contain any native Raster Pipeline op...
64
65 // ... as well as our extended ops.
67 #undef M
68};
69
70// BuilderOps are a superset of ProgramOps. They are used by the RP::Builder, which works in terms
71// of Instructions; Instructions are slightly more expressive than raw SkRasterPipelineOps. In
72// particular, the Builder supports stacks for pushing and popping scratch values.
73// RP::Program::makeStages is responsible for rewriting Instructions/BuilderOps into an array of
74// RP::Program::Stages, which will contain only native SkRasterPipelineOps and (optionally)
75// child-effect invocations.
76enum class BuilderOp {
77 #define M(stage) stage,
78 // An in-flight program can contain all the native Raster Pipeline ops...
80
81 // ... and our extended ops...
83 #undef M
84
85 // ... and also has Builder-specific ops. These ops generally interface with the stack, and are
86 // converted into ProgramOps during `makeStages`.
105 pad_stack,
106 select,
122};
123
124// If the extended ops are not in sync between enums, program creation will not work.
125static_assert((int)ProgramOp::label == (int)BuilderOp::label);
126
127// Represents a single raster-pipeline SkSL instruction.
128struct Instruction {
129 BuilderOp fOp;
130 Slot fSlotA = NA;
131 Slot fSlotB = NA;
132 int fImmA = 0;
133 int fImmB = 0;
134 int fImmC = 0;
135 int fImmD = 0;
136 int fStackID = 0;
137};
138
139class Callbacks {
140public:
141 virtual ~Callbacks() = default;
142
143 virtual bool appendShader(int index) = 0;
144 virtual bool appendColorFilter(int index) = 0;
145 virtual bool appendBlender(int index) = 0;
146
147 virtual void toLinearSrgb(const void* color) = 0;
148 virtual void fromLinearSrgb(const void* color) = 0;
149};
150
151class Program {
152public:
154 int numValueSlots,
155 int numUniformSlots,
156 int numImmutableSlots,
157 int numLabels,
158 DebugTracePriv* debugTrace);
159 ~Program();
160
161 bool appendStages(SkRasterPipeline* pipeline,
162 SkArenaAlloc* alloc,
163 Callbacks* callbacks,
164 SkSpan<const float> uniforms) const;
165
166 void dump(SkWStream* out, bool writeInstructionCount = false) const;
167
168 int numUniforms() const { return fNumUniformSlots; }
169
170private:
171 using StackDepths = skia_private::TArray<int>; // [stack index] = depth of stack
172
173 struct SlotData {
175 SkSpan<float> stack;
176 SkSpan<float> immutable;
177 };
178 SlotData allocateSlotData(SkArenaAlloc* alloc) const;
179
180 struct Stage {
181 ProgramOp op;
182 void* ctx;
183 };
184 void makeStages(skia_private::TArray<Stage>* pipeline,
185 SkArenaAlloc* alloc,
186 SkSpan<const float> uniforms,
187 const SlotData& slots) const;
188 void optimize();
189 StackDepths tempStackMaxDepths() const;
190
191 // These methods are used to split up multi-slot copies into multiple ops as needed.
192 void appendCopy(skia_private::TArray<Stage>* pipeline,
193 SkArenaAlloc* alloc,
194 std::byte* basePtr,
195 ProgramOp baseStage,
196 SkRPOffset dst, int dstStride,
197 SkRPOffset src, int srcStride,
198 int numSlots) const;
199 void appendCopyImmutableUnmasked(skia_private::TArray<Stage>* pipeline,
200 SkArenaAlloc* alloc,
201 std::byte* basePtr,
202 SkRPOffset dst,
203 SkRPOffset src,
204 int numSlots) const;
205 void appendCopySlotsUnmasked(skia_private::TArray<Stage>* pipeline,
206 SkArenaAlloc* alloc,
207 SkRPOffset dst,
208 SkRPOffset src,
209 int numSlots) const;
210 void appendCopySlotsMasked(skia_private::TArray<Stage>* pipeline,
211 SkArenaAlloc* alloc,
212 SkRPOffset dst,
213 SkRPOffset src,
214 int numSlots) const;
215
216 // Appends a single-slot single-input math operation to the pipeline. The op `stage` will
217 // appended `numSlots` times, starting at position `dst` and advancing one slot for each
218 // subsequent invocation.
219 void appendSingleSlotUnaryOp(skia_private::TArray<Stage>* pipeline, ProgramOp stage,
220 float* dst, int numSlots) const;
221
222 // Appends a multi-slot single-input math operation to the pipeline. `baseStage` must refer to
223 // a single-slot "apply_op" stage, which must be immediately followed by specializations for
224 // 2-4 slots. For instance, {`ceil_float`, `ceil_2_floats`, `ceil_3_floats`, `ceil_4_floats`}
225 // must be contiguous ops in the stage list, listed in that order; pass `ceil_float` and we
226 // pick the appropriate op based on `numSlots`.
227 void appendMultiSlotUnaryOp(skia_private::TArray<Stage>* pipeline, ProgramOp baseStage,
228 float* dst, int numSlots) const;
229
230 // Appends an immediate-mode binary operation to the pipeline. `baseStage` must refer to
231 // a single-slot, immediate-mode "apply-imm" stage, which must be immediately preceded by
232 // specializations for 2-4 slots if numSlots is greater than 1. For instance, {`add_imm_4_ints`,
233 // `add_imm_3_ints`, `add_imm_2_ints`, `add_imm_int`} must be contiguous ops in the stage list,
234 // listed in that order; pass `add_imm_int` and we pick the appropriate op based on `numSlots`.
235 // Some immediate-mode binary ops are single-slot only in the interest of code size; in this
236 // case, the multi-slot ops can be absent, but numSlots must be 1.
237 void appendImmediateBinaryOp(skia_private::TArray<Stage>* pipeline, SkArenaAlloc* alloc,
238 ProgramOp baseStage,
239 SkRPOffset dst, int32_t value, int numSlots) const;
240
241 // Appends a two-input math operation to the pipeline. `src` must be _immediately_ after `dst`
242 // in memory. `baseStage` must refer to an unbounded "apply_to_n_slots" stage. A BinaryOpCtx
243 // will be used to pass pointers to the destination and source; the delta between the two
244 // pointers implicitly gives the number of slots.
245 void appendAdjacentNWayBinaryOp(skia_private::TArray<Stage>* pipeline, SkArenaAlloc* alloc,
246 ProgramOp stage,
247 SkRPOffset dst, SkRPOffset src, int numSlots) const;
248
249 // Appends a multi-slot two-input math operation to the pipeline. `src` must be _immediately_
250 // after `dst` in memory. `baseStage` must refer to an unbounded "apply_to_n_slots" stage, which
251 // must be immediately followed by specializations for 1-4 slots. For instance, {`add_n_floats`,
252 // `add_float`, `add_2_floats`, `add_3_floats`, `add_4_floats`} must be contiguous ops in the
253 // stage list, listed in that order; pass `add_n_floats` and we pick the appropriate op based on
254 // `numSlots`.
255 void appendAdjacentMultiSlotBinaryOp(skia_private::TArray<Stage>* pipeline, SkArenaAlloc* alloc,
256 ProgramOp baseStage, std::byte* basePtr,
257 SkRPOffset dst, SkRPOffset src, int numSlots) const;
258
259 // Appends a multi-slot math operation having three inputs (dst, src0, src1) and one output
260 // (dst) to the pipeline. The three inputs must be _immediately_ adjacent in memory. `baseStage`
261 // must refer to an unbounded "apply_to_n_slots" stage, which must be immediately followed by
262 // specializations for 1-4 slots.
263 void appendAdjacentMultiSlotTernaryOp(skia_private::TArray<Stage>* pipeline,
264 SkArenaAlloc* alloc, ProgramOp baseStage,
265 std::byte* basePtr, SkRPOffset dst, SkRPOffset src0,
266 SkRPOffset src1, int numSlots) const;
267
268 // Appends a math operation having three inputs (dst, src0, src1) and one output (dst) to the
269 // pipeline. The three inputs must be _immediately_ adjacent in memory. `baseStage` must refer
270 // to an unbounded "apply_to_n_slots" stage. A TernaryOpCtx will be used to pass pointers to the
271 // destination and sources; the delta between the each pointer implicitly gives the slot count.
272 void appendAdjacentNWayTernaryOp(skia_private::TArray<Stage>* pipeline, SkArenaAlloc* alloc,
273 ProgramOp stage, std::byte* basePtr, SkRPOffset dst,
274 SkRPOffset src0, SkRPOffset src1, int numSlots) const;
275
276 // Appends a stack_rewind op on platforms where it is needed (when SK_HAS_MUSTTAIL is not set).
277 void appendStackRewind(skia_private::TArray<Stage>* pipeline) const;
278
279 class Dumper;
280 friend class Dumper;
281
283 int fNumValueSlots = 0;
284 int fNumUniformSlots = 0;
285 int fNumImmutableSlots = 0;
286 int fNumTempStackSlots = 0;
287 int fNumLabels = 0;
288 StackDepths fTempStackMaxDepths;
289 DebugTracePriv* fDebugTrace = nullptr;
290 std::unique_ptr<SkSL::TraceHook> fTraceHook;
291};
292
293class Builder {
294public:
295 /** Finalizes and optimizes the program. */
296 std::unique_ptr<Program> finish(int numValueSlots,
297 int numUniformSlots,
298 int numImmutableSlots,
299 DebugTracePriv* debugTrace = nullptr);
300 /**
301 * Peels off a label ID for use in the program. Set the label's position in the program with
302 * the `label` instruction. Actually branch to the target with an instruction like
303 * `branch_if_any_lanes_active` or `jump`.
304 */
305 int nextLabelID() {
306 return fNumLabels++;
307 }
308
309 /**
310 * The builder keeps track of the state of execution masks; when we know that the execution
311 * mask is unaltered, we can generate simpler code. Code which alters the execution mask is
312 * required to enable this flag.
313 */
314 void enableExecutionMaskWrites() {
315 ++fExecutionMaskWritesEnabled;
316 }
317
318 void disableExecutionMaskWrites() {
319 SkASSERT(this->executionMaskWritesAreEnabled());
320 --fExecutionMaskWritesEnabled;
321 }
322
323 bool executionMaskWritesAreEnabled() {
324 return fExecutionMaskWritesEnabled > 0;
325 }
326
327 /** Assemble a program from the Raster Pipeline instructions below. */
328 void init_lane_masks() {
329 this->appendInstruction(BuilderOp::init_lane_masks, {});
330 }
331
332 void store_src_rg(SlotRange slots) {
333 SkASSERT(slots.count == 2);
334 this->appendInstruction(BuilderOp::store_src_rg, {slots.index});
335 }
336
337 void store_src(SlotRange slots) {
338 SkASSERT(slots.count == 4);
339 this->appendInstruction(BuilderOp::store_src, {slots.index});
340 }
341
342 void store_dst(SlotRange slots) {
343 SkASSERT(slots.count == 4);
344 this->appendInstruction(BuilderOp::store_dst, {slots.index});
345 }
346
347 void store_device_xy01(SlotRange slots) {
348 SkASSERT(slots.count == 4);
349 this->appendInstruction(BuilderOp::store_device_xy01, {slots.index});
350 }
351
352 void load_src(SlotRange slots) {
353 SkASSERT(slots.count == 4);
354 this->appendInstruction(BuilderOp::load_src, {slots.index});
355 }
356
357 void load_dst(SlotRange slots) {
358 SkASSERT(slots.count == 4);
359 this->appendInstruction(BuilderOp::load_dst, {slots.index});
360 }
361
362 void set_current_stack(int stackID) {
363 fCurrentStackID = stackID;
364 }
365
366 // Inserts a label into the instruction stream.
367 void label(int labelID);
368
369 // Unconditionally branches to a label.
370 void jump(int labelID);
371
372 // Branches to a label if the execution mask is active in every lane.
373 void branch_if_all_lanes_active(int labelID);
374
375 // Branches to a label if the execution mask is active in any lane.
376 void branch_if_any_lanes_active(int labelID);
377
378 // Branches to a label if the execution mask is inactive across all lanes.
379 void branch_if_no_lanes_active(int labelID);
380
381 // Branches to a label if the top value on the stack is _not_ equal to `value` in any lane.
382 void branch_if_no_active_lanes_on_stack_top_equal(int value, int labelID);
383
384 // We use the same SkRasterPipeline op regardless of the literal type, and bitcast the value.
385 void push_constant_i(int32_t val, int count = 1);
386
387 void push_zeros(int count) {
388 this->push_constant_i(/*val=*/0, count);
389 }
390
391 void push_constant_f(float val) {
392 this->push_constant_i(sk_bit_cast<int32_t>(val), /*count=*/1);
393 }
394
395 void push_constant_u(uint32_t val, int count = 1) {
396 this->push_constant_i(sk_bit_cast<int32_t>(val), count);
397 }
398
399 // Translates into copy_uniforms (from uniforms into temp stack) in Raster Pipeline.
400 void push_uniform(SlotRange src);
401
402 // Initializes the Raster Pipeline slot with a constant value when the program is first created.
403 // Does not add any instructions to the program.
404 void store_immutable_value_i(Slot slot, int32_t val) {
405 this->appendInstruction(BuilderOp::store_immutable_value, {slot}, val);
406 }
407
408 // Translates into copy_uniforms (from uniforms into value-slots) in Raster Pipeline.
409 void copy_uniform_to_slots_unmasked(SlotRange dst, SlotRange src);
410
411 // Translates into copy_from_indirect_uniform_unmasked (from values into temp stack) in Raster
412 // Pipeline. `fixedRange` denotes a fixed set of slots; this range is pushed forward by the
413 // value at the top of stack `dynamicStack`. Pass the range of the uniform being indexed as
414 // `limitRange`; this is used as a hard cap, to avoid indexing outside of bounds.
415 void push_uniform_indirect(SlotRange fixedRange, int dynamicStack, SlotRange limitRange);
416
417
418 // Translates into copy_slots_unmasked (from values into temp stack) in Raster Pipeline.
419 void push_slots(SlotRange src) {
420 this->push_slots_or_immutable(src, BuilderOp::push_slots);
421 }
422
423 // Translates into copy_immutable_unmasked (from immutables into temp stack) in Raster Pipeline.
424 void push_immutable(SlotRange src) {
425 this->push_slots_or_immutable(src, BuilderOp::push_immutable);
426 }
427
428 void push_slots_or_immutable(SlotRange src, BuilderOp op);
429
430 // Translates into copy_from_indirect_unmasked (from values into temp stack) in Raster Pipeline.
431 // `fixedRange` denotes a fixed set of slots; this range is pushed forward by the value at the
432 // top of stack `dynamicStack`. Pass the slot range of the variable being indexed as
433 // `limitRange`; this is used as a hard cap, to avoid indexing outside of bounds.
434 void push_slots_indirect(SlotRange fixedRange, int dynamicStack, SlotRange limitRange) {
435 this->push_slots_or_immutable_indirect(fixedRange, dynamicStack, limitRange,
436 BuilderOp::push_slots_indirect);
437 }
438
439 void push_immutable_indirect(SlotRange fixedRange, int dynamicStack, SlotRange limitRange) {
440 this->push_slots_or_immutable_indirect(fixedRange, dynamicStack, limitRange,
441 BuilderOp::push_immutable_indirect);
442 }
443
444 void push_slots_or_immutable_indirect(SlotRange fixedRange, int dynamicStack,
445 SlotRange limitRange, BuilderOp op);
446
447 // Translates into copy_slots_masked (from temp stack to values) in Raster Pipeline.
448 // Does not discard any values on the temp stack.
449 void copy_stack_to_slots(SlotRange dst) {
450 this->copy_stack_to_slots(dst, /*offsetFromStackTop=*/dst.count);
451 }
452
453 void copy_stack_to_slots(SlotRange dst, int offsetFromStackTop);
454
455 // Translates into swizzle_copy_slots_masked (from temp stack to values) in Raster Pipeline.
456 // Does not discard any values on the temp stack.
457 void swizzle_copy_stack_to_slots(SlotRange dst,
458 SkSpan<const int8_t> components,
459 int offsetFromStackTop);
460
461 // Translates into swizzle_copy_to_indirect_masked (from temp stack to values) in Raster
462 // Pipeline. Does not discard any values on the temp stack.
463 void swizzle_copy_stack_to_slots_indirect(SlotRange fixedRange,
464 int dynamicStackID,
465 SlotRange limitRange,
466 SkSpan<const int8_t> components,
467 int offsetFromStackTop);
468
469 // Translates into copy_slots_unmasked (from temp stack to values) in Raster Pipeline.
470 // Does not discard any values on the temp stack.
471 void copy_stack_to_slots_unmasked(SlotRange dst) {
472 this->copy_stack_to_slots_unmasked(dst, /*offsetFromStackTop=*/dst.count);
473 }
474
475 void copy_stack_to_slots_unmasked(SlotRange dst, int offsetFromStackTop);
476
477 // Translates into copy_to_indirect_masked (from temp stack into values) in Raster Pipeline.
478 // `fixedRange` denotes a fixed set of slots; this range is pushed forward by the value at the
479 // top of stack `dynamicStack`. Pass the slot range of the variable being indexed as
480 // `limitRange`; this is used as a hard cap, to avoid indexing outside of bounds.
481 void copy_stack_to_slots_indirect(SlotRange fixedRange,
482 int dynamicStackID,
483 SlotRange limitRange);
484
485 // Copies from temp stack to slots, including an indirect offset, then shrinks the temp stack.
486 void pop_slots_indirect(SlotRange fixedRange, int dynamicStackID, SlotRange limitRange) {
487 this->copy_stack_to_slots_indirect(fixedRange, dynamicStackID, limitRange);
488 this->discard_stack(fixedRange.count);
489 }
490
491 // Performs a unary op (like `bitwise_not`), given a slot count of `slots`. The stack top is
492 // replaced with the result.
493 void unary_op(BuilderOp op, int32_t slots);
494
495 // Performs a binary op (like `add_n_floats` or `cmpeq_n_ints`), given a slot count of
496 // `slots`. Two n-slot input values are consumed, and the result is pushed onto the stack.
497 void binary_op(BuilderOp op, int32_t slots);
498
499 // Performs a ternary op (like `mix` or `smoothstep`), given a slot count of
500 // `slots`. Three n-slot input values are consumed, and the result is pushed onto the stack.
501 void ternary_op(BuilderOp op, int32_t slots);
502
503 // Computes a dot product on the stack. The slots consumed (`slots`) must be between 1 and 4.
504 // Two n-slot input vectors are consumed, and a scalar result is pushed onto the stack.
505 void dot_floats(int32_t slots);
506
507 // Computes refract(N, I, eta) on the stack. N and I are assumed to be 4-slot vectors, and can
508 // be padded with zeros for smaller inputs. Eta is a scalar. The result is a 4-slot vector.
509 void refract_floats();
510
511 // Computes inverse(matN) on the stack. Pass 2, 3 or 4 for n to specify matrix size.
512 void inverse_matrix(int32_t n);
513
514 // Shrinks the temp stack, discarding values on top.
515 void discard_stack(int32_t count, int stackID);
516
517 void discard_stack(int32_t count) {
518 this->discard_stack(count, fCurrentStackID);
519 }
520
521 // Grows the temp stack, leaving any preexisting values in place.
522 void pad_stack(int32_t count);
523
524 // Copies vales from the temp stack into slots, and then shrinks the temp stack.
525 void pop_slots(SlotRange dst);
526
527 // Creates many clones of the top single-slot item on the temp stack.
528 void push_duplicates(int count);
529
530 // Creates a single clone of an item on the current temp stack. The cloned item can consist of
531 // any number of slots, and can be copied from an earlier position on the stack.
532 void push_clone(int numSlots, int offsetFromStackTop = 0);
533
534 // Clones a range of slots from another stack onto this stack.
535 void push_clone_from_stack(SlotRange range, int otherStackID, int offsetFromStackTop);
536
537 // Translates into copy_from_indirect_unmasked (from one temp stack to another) in Raster
538 // Pipeline. `fixedOffset` denotes a range of slots within the top `offsetFromStackTop` slots of
539 // `otherStackID`. This range is pushed forward by the value at the top of `dynamicStackID`.
540 void push_clone_indirect_from_stack(SlotRange fixedOffset,
541 int dynamicStackID,
542 int otherStackID,
543 int offsetFromStackTop);
544
545 // Compares the stack top with the passed-in value; if it matches, enables the loop mask.
546 void case_op(int value) {
547 this->appendInstruction(BuilderOp::case_op, {}, value);
548 }
549
550 // Performs a `continue` in a loop.
551 void continue_op(int continueMaskStackID) {
552 this->appendInstruction(BuilderOp::continue_op, {}, continueMaskStackID);
553 }
554
555 void select(int slots) {
556 // Overlays the top two entries on the stack, making one hybrid entry. The execution mask
557 // is used to select which lanes are preserved.
558 SkASSERT(slots > 0);
559 this->appendInstruction(BuilderOp::select, {}, slots);
560 }
561
562 // The opposite of push_slots; copies values from the temp stack into value slots, then
563 // shrinks the temp stack.
564 void pop_slots_unmasked(SlotRange dst);
565
566 void copy_slots_masked(SlotRange dst, SlotRange src) {
567 SkASSERT(dst.count == src.count);
568 this->appendInstruction(BuilderOp::copy_slot_masked, {dst.index, src.index}, dst.count);
569 }
570
571 void copy_slots_unmasked(SlotRange dst, SlotRange src);
572
573 void copy_immutable_unmasked(SlotRange dst, SlotRange src);
574
575 // Directly writes a constant value into a slot.
576 void copy_constant(Slot slot, int constantValue);
577
578 // Stores zeros across the entire slot range.
579 void zero_slots_unmasked(SlotRange dst);
580
581 // Consumes `consumedSlots` elements on the stack, then generates `components.size()` elements.
582 void swizzle(int consumedSlots, SkSpan<const int8_t> components);
583
584 // Transposes a matrix of size CxR on the stack (into a matrix of size RxC).
585 void transpose(int columns, int rows);
586
587 // Generates a CxR diagonal matrix from the top two scalars on the stack. The second scalar is
588 // used as the diagonal value; the first scalar (usually zero) fills in the rest of the slots.
589 void diagonal_matrix(int columns, int rows);
590
591 // Resizes a CxR matrix at the top of the stack to C'xR'.
592 void matrix_resize(int origColumns, int origRows, int newColumns, int newRows);
593
594 // Multiplies a CxR matrix/vector against an adjacent CxR matrix/vector on the stack.
595 void matrix_multiply(int leftColumns, int leftRows, int rightColumns, int rightRows);
596
597 void push_condition_mask();
598
599 void pop_condition_mask() {
600 SkASSERT(this->executionMaskWritesAreEnabled());
601 this->appendInstruction(BuilderOp::pop_condition_mask, {});
602 }
603
604 void merge_condition_mask();
605
606 void merge_inv_condition_mask() {
607 SkASSERT(this->executionMaskWritesAreEnabled());
608 this->appendInstruction(BuilderOp::merge_inv_condition_mask, {});
609 }
610
611 void push_loop_mask() {
612 SkASSERT(this->executionMaskWritesAreEnabled());
613 this->appendInstruction(BuilderOp::push_loop_mask, {});
614 }
615
616 void pop_loop_mask() {
617 SkASSERT(this->executionMaskWritesAreEnabled());
618 this->appendInstruction(BuilderOp::pop_loop_mask, {});
619 }
620
621 // Exchanges src.rgba with the four values at the top of the stack.
622 void exchange_src();
623
624 void push_src_rgba() {
625 this->appendInstruction(BuilderOp::push_src_rgba, {});
626 }
627
628 void push_dst_rgba() {
629 this->appendInstruction(BuilderOp::push_dst_rgba, {});
630 }
631
632 void push_device_xy01() {
633 this->appendInstruction(BuilderOp::push_device_xy01, {});
634 }
635
636 void pop_src_rgba();
637
638 void pop_dst_rgba() {
639 this->appendInstruction(BuilderOp::pop_dst_rgba, {});
640 }
641
642 void mask_off_loop_mask() {
643 SkASSERT(this->executionMaskWritesAreEnabled());
644 this->appendInstruction(BuilderOp::mask_off_loop_mask, {});
645 }
646
647 void reenable_loop_mask(SlotRange src) {
648 SkASSERT(this->executionMaskWritesAreEnabled());
649 SkASSERT(src.count == 1);
650 this->appendInstruction(BuilderOp::reenable_loop_mask, {src.index});
651 }
652
654 SkASSERT(this->executionMaskWritesAreEnabled());
655 this->appendInstruction(BuilderOp::pop_and_reenable_loop_mask, {});
656 }
657
658 void merge_loop_mask() {
659 SkASSERT(this->executionMaskWritesAreEnabled());
660 this->appendInstruction(BuilderOp::merge_loop_mask, {});
661 }
662
663 void push_return_mask() {
664 SkASSERT(this->executionMaskWritesAreEnabled());
665 this->appendInstruction(BuilderOp::push_return_mask, {});
666 }
667
668 void pop_return_mask();
669
670 void mask_off_return_mask() {
671 SkASSERT(this->executionMaskWritesAreEnabled());
672 this->appendInstruction(BuilderOp::mask_off_return_mask, {});
673 }
674
675 void invoke_shader(int childIdx) {
676 this->appendInstruction(BuilderOp::invoke_shader, {}, childIdx);
677 }
678
679 void invoke_color_filter(int childIdx) {
680 this->appendInstruction(BuilderOp::invoke_color_filter, {}, childIdx);
681 }
682
683 void invoke_blender(int childIdx) {
684 this->appendInstruction(BuilderOp::invoke_blender, {}, childIdx);
685 }
686
687 void invoke_to_linear_srgb() {
688 // The intrinsics accept a three-component value; add a fourth padding element (which
689 // will be ignored) since our RP ops deal in RGBA colors.
690 this->pad_stack(1);
691 this->appendInstruction(BuilderOp::invoke_to_linear_srgb, {});
692 this->discard_stack(1);
693 }
694
695 void invoke_from_linear_srgb() {
696 // The intrinsics accept a three-component value; add a fourth padding element (which
697 // will be ignored) since our RP ops deal in RGBA colors.
698 this->pad_stack(1);
699 this->appendInstruction(BuilderOp::invoke_from_linear_srgb, {});
700 this->discard_stack(1);
701 }
702
703 // Writes the current line number to the debug trace.
704 void trace_line(int traceMaskStackID, int line) {
705 this->appendInstruction(BuilderOp::trace_line, {}, traceMaskStackID, line);
706 }
707
708 // Writes a variable update to the debug trace.
709 void trace_var(int traceMaskStackID, SlotRange r) {
710 this->appendInstruction(BuilderOp::trace_var, {r.index}, traceMaskStackID, r.count);
711 }
712
713 // Writes a variable update (via indirection) to the debug trace.
714 void trace_var_indirect(int traceMaskStackID, SlotRange fixedRange,
715 int dynamicStackID, SlotRange limitRange);
716
717 // Writes a function-entrance to the debug trace.
718 void trace_enter(int traceMaskStackID, int funcID) {
719 this->appendInstruction(BuilderOp::trace_enter, {}, traceMaskStackID, funcID);
720 }
721
722 // Writes a function-exit to the debug trace.
723 void trace_exit(int traceMaskStackID, int funcID) {
724 this->appendInstruction(BuilderOp::trace_exit, {}, traceMaskStackID, funcID);
725 }
726
727 // Writes a scope-level change to the debug trace.
728 void trace_scope(int traceMaskStackID, int delta) {
729 this->appendInstruction(BuilderOp::trace_scope, {}, traceMaskStackID, delta);
730 }
731
732private:
733 struct SlotList {
734 SlotList(Slot a = NA, Slot b = NA) : fSlotA(a), fSlotB(b) {}
735 Slot fSlotA = NA;
736 Slot fSlotB = NA;
737 };
738 void appendInstruction(BuilderOp op, SlotList slots,
739 int a = 0, int b = 0, int c = 0, int d = 0);
740 Instruction* lastInstruction(int fromBack = 0);
741 Instruction* lastInstructionOnAnyStack(int fromBack = 0);
742 void simplifyPopSlotsUnmasked(SlotRange* dst);
743 bool simplifyImmediateUnmaskedOp();
744
746 int fNumLabels = 0;
747 int fExecutionMaskWritesEnabled = 0;
748 int fCurrentStackID = 0;
749};
750
751} // namespace RP
752} // namespace SkSL
753
754#endif // SKSL_RASTERPIPELINEBUILDER
int count
SkColor4f color
#define SkASSERT(cond)
Definition SkAssert.h:116
static SkTileMode optimize(SkTileMode tm, int dimension)
uint32_t SkRPOffset
#define SK_RASTER_PIPELINE_OPS_ALL(M)
#define M(stage)
#define SKRP_EXTENDED_OPS(M)
static void dump(const float m[20], SkYUVColorSpace cs, bool rgb2yuv)
Type::kYUV Type::kRGBA() int(0.7 *637)
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE auto & d
Definition main.cc:19
static bool b
struct MyStruct a[10]
uint8_t value
SI void matrix_multiply(SkRasterPipeline_MatrixMultiplyCtx *packed, std::byte *base)
constexpr Slot NA
@ swizzle_copy_stack_to_slots_indirect
@ branch_if_no_active_lanes_on_stack_top_equal
DlVertices::Builder Builder
dst
Definition cp.py:12

Typedef Documentation

◆ SkRPOffset

using SkRPOffset = uint32_t

Definition at line 26 of file SkSLRasterPipelineBuilder.h.