Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
Namespaces | Classes | Typedefs | Functions | Variables
SK_OPTS_NS Namespace Reference

Namespaces

namespace  lowp
 

Classes

struct  Params
 
struct  RGB
 

Typedefs

using F = float
 
using I32 = int32_t
 
using U64 = uint64_t
 
using U32 = uint32_t
 
using U16 = uint16_t
 
using U8 = uint8_t
 
using Stage = void(ABI *)(Params *, SkRasterPipelineStage *program, F r, F g, F b, F a)
 

Functions

template<typename U32 , typename Out >
static void decode_packed_coordinates_and_weight (U32 packed, Out *v0, Out *v1, Out *w)
 
void S32_alpha_D32_filter_DX (const SkBitmapProcState &s, const uint32_t *xy, int count, uint32_t *colors)
 
static __m128i SkAlphaMul_lsx (__m128i x, __m128i y)
 
template<bool isColor>
static void D32_A8_Opaque_Color_lsx (void *SK_RESTRICT dst, size_t dstRB, const void *SK_RESTRICT maskPtr, size_t maskRB, SkColor color, int width, int height)
 
static void blit_mask_d32_a8_general (SkPMColor *dst, size_t dstRB, const SkAlpha *mask, size_t maskRB, SkColor color, int w, int h)
 
static void blit_mask_d32_a8_opaque (SkPMColor *dst, size_t dstRB, const SkAlpha *mask, size_t maskRB, SkColor color, int w, int h)
 
static void blit_mask_d32_a8_black (SkPMColor *dst, size_t dstRB, const SkAlpha *maskPtr, size_t maskRB, int width, int height)
 
void blit_mask_d32_a8 (SkPMColor *dst, size_t dstRB, const SkAlpha *mask, size_t maskRB, SkColor color, int w, int h)
 
void blit_row_s32a_opaque (SkPMColor *dst, const SkPMColor *src, int len, U8CPU alpha)
 
void blit_row_color32 (SkPMColor *dst, int count, SkPMColor color)
 
template<typename T >
static void memsetT (T buffer[], T value, int count)
 
void memset16 (uint16_t buffer[], uint16_t value, int count)
 
void memset32 (uint32_t buffer[], uint32_t value, int count)
 
void memset64 (uint64_t buffer[], uint64_t value, int count)
 
template<typename T >
static void rect_memsetT (T buffer[], T value, int count, size_t rowBytes, int height)
 
void rect_memset16 (uint16_t buffer[], uint16_t value, int count, size_t rowBytes, int height)
 
void rect_memset32 (uint32_t buffer[], uint32_t value, int count, size_t rowBytes, int height)
 
void rect_memset64 (uint64_t buffer[], uint64_t value, int count, size_t rowBytes, int height)
 
SI F min (F a, F b)
 
SI I32 min (I32 a, I32 b)
 
SI U32 min (U32 a, U32 b)
 
SI F max (F a, F b)
 
SI I32 max (I32 a, I32 b)
 
SI U32 max (U32 a, U32 b)
 
SI F mad (F f, F m, F a)
 
SI F nmad (F f, F m, F a)
 
SI F abs_ (F v)
 
SI I32 abs_ (I32 v)
 
SI F floor_ (F v)
 
SI F ceil_ (F v)
 
SI F rcp_approx (F v)
 
SI F rsqrt_approx (F v)
 
SI F sqrt_ (F v)
 
SI F rcp_precise (F v)
 
SI I32 iround (F v)
 
SI U32 round (F v)
 
SI U32 round (F v, F scale)
 
SI U16 pack (U32 v)
 
SI U8 pack (U16 v)
 
SI F if_then_else (I32 c, F t, F e)
 
SI I32 if_then_else (I32 c, I32 t, I32 e)
 
SI bool any (I32 c)
 
SI bool all (I32 c)
 
template<typename T >
SI T gather (const T *p, U32 ix)
 
SI void scatter_masked (I32 src, int *dst, U32 ix, I32 mask)
 
SI void load2 (const uint16_t *ptr, U16 *r, U16 *g)
 
SI void store2 (uint16_t *ptr, U16 r, U16 g)
 
SI void load4 (const uint16_t *ptr, U16 *r, U16 *g, U16 *b, U16 *a)
 
SI void store4 (uint16_t *ptr, U16 r, U16 g, U16 b, U16 a)
 
SI void load4 (const float *ptr, F *r, F *g, F *b, F *a)
 
SI void store4 (float *ptr, F r, F g, F b, F a)
 
SI constexpr F F_ (float x)
 
SI constexpr I32 I32_ (int32_t x)
 
SI constexpr U32 U32_ (uint32_t x)
 
SI F cast (U32 v)
 
SI F cast64 (U64 v)
 
SI U32 trunc_ (F v)
 
SI U32 expand (U16 v)
 
SI U32 expand (U8 v)
 
SI F fract (F v)
 
SI F approx_log2 (F x)
 
SI F approx_log (F x)
 
SI F approx_pow2 (F x)
 
SI F approx_exp (F x)
 
SI F approx_powf (F x, F y)
 
SI F from_half (U16 h)
 
SI U16 to_half (F f)
 
static void patch_memory_contexts (SkSpan< SkRasterPipeline_MemoryCtxPatch > memoryCtxPatches, size_t dx, size_t dy, size_t tail)
 
static void restore_memory_contexts (SkSpan< SkRasterPipeline_MemoryCtxPatch > memoryCtxPatches, size_t dx, size_t dy, size_t tail)
 
SI F rcp_fast (F v)
 
SI F rsqrt (F v)
 
static void start_pipeline (size_t dx, size_t dy, size_t xlimit, size_t ylimit, SkRasterPipelineStage *program, SkSpan< SkRasterPipeline_MemoryCtxPatch > memoryCtxPatches, uint8_t *tailPointer)
 
static void ABI just_return (Params *, SkRasterPipelineStage *, F, F, F, F)
 
static void ABI stack_checkpoint (Params *params, SkRasterPipelineStage *program, F r, F g, F b, F a)
 
static void ABI stack_rewind (Params *params, SkRasterPipelineStage *program, F r, F g, F b, F a)
 
template<typename V , typename T >
SI V load (const T *src)
 
template<typename V , typename T >
SI void store (T *dst, V v)
 
SI F from_byte (U8 b)
 
SI F from_short (U16 s)
 
SI void from_565 (U16 _565, F *r, F *g, F *b)
 
SI void from_4444 (U16 _4444, F *r, F *g, F *b, F *a)
 
SI void from_8888 (U32 _8888, F *r, F *g, F *b, F *a)
 
SI void from_88 (U16 _88, F *r, F *g)
 
SI void from_1010102 (U32 rgba, F *r, F *g, F *b, F *a)
 
SI void from_1010102_xr (U32 rgba, F *r, F *g, F *b, F *a)
 
SI void from_10101010_xr (U64 _10x6, F *r, F *g, F *b, F *a)
 
SI void from_10x6 (U64 _10x6, F *r, F *g, F *b, F *a)
 
SI void from_1616 (U32 _1616, F *r, F *g)
 
SI void from_16161616 (U64 _16161616, F *r, F *g, F *b, F *a)
 
template<typename T >
SI Tptr_at_xy (const SkRasterPipeline_MemoryCtx *ctx, size_t dx, size_t dy)
 
SI F clamp (F v, F limit)
 
SI F clamp_ex (F v, float limit)
 
SI F sin5q_ (F x)
 
SI F sin_ (F x)
 
SI F cos_ (F x)
 
SI F tan_ (F x)
 
SI F approx_atan_unit (F x)
 
SI F atan_ (F x)
 
SI F asin_ (F x)
 
SI F acos_ (F x)
 
SI F atan2_ (F y0, F x0)
 
template<typename T >
SI U32 ix_and_ptr (T **ptr, const SkRasterPipeline_GatherCtx *ctx, F x, F y)
 
SI U32 to_unorm (F v, float scale, float bias=1.0f)
 
SI I32 cond_to_mask (I32 cond)
 
SI uint32_t select_lane (uint32_t data, int)
 
SI int32_t select_lane (int32_t data, int)
 
 STAGE (seed_shader, NoCtx)
 
 STAGE (dither, const float *rate)
 
 STAGE (uniform_color, const SkRasterPipeline_UniformColorCtx *c)
 
 STAGE (unbounded_uniform_color, const SkRasterPipeline_UniformColorCtx *c)
 
 STAGE (uniform_color_dst, const SkRasterPipeline_UniformColorCtx *c)
 
 STAGE (black_color, NoCtx)
 
 STAGE (white_color, NoCtx)
 
 STAGE (load_src, const float *ptr)
 
 STAGE (store_src, float *ptr)
 
 STAGE (store_src_rg, float *ptr)
 
 STAGE (load_src_rg, float *ptr)
 
 STAGE (store_src_a, float *ptr)
 
 STAGE (load_dst, const float *ptr)
 
 STAGE (store_dst, float *ptr)
 
SI F inv (F x)
 
SI F two (F x)
 
 BLEND_MODE (clear)
 
 BLEND_MODE (srcatop)
 
 BLEND_MODE (dstatop)
 
 BLEND_MODE (srcin)
 
 BLEND_MODE (dstin)
 
 BLEND_MODE (srcout)
 
 BLEND_MODE (dstout)
 
 BLEND_MODE (srcover)
 
 BLEND_MODE (dstover)
 
 BLEND_MODE (modulate)
 
 BLEND_MODE (multiply)
 
 BLEND_MODE (plus_)
 
 BLEND_MODE (screen)
 
 BLEND_MODE (xor_)
 
 BLEND_MODE (darken)
 
 BLEND_MODE (lighten)
 
 BLEND_MODE (difference)
 
 BLEND_MODE (exclusion)
 
 BLEND_MODE (colorburn)
 
 BLEND_MODE (colordodge)
 
 BLEND_MODE (hardlight)
 
 BLEND_MODE (overlay)
 
 BLEND_MODE (softlight)
 
SI F sat (F r, F g, F b)
 
SI F lum (F r, F g, F b)
 
SI void set_sat (F *r, F *g, F *b, F s)
 
SI void set_lum (F *r, F *g, F *b, F l)
 
SI F clip_channel (F c, F l, I32 clip_low, I32 clip_high, F mn_scale, F mx_scale)
 
SI void clip_color (F *r, F *g, F *b, F a)
 
 STAGE (hue, NoCtx)
 
 STAGE (saturation, NoCtx)
 
 STAGE (color, NoCtx)
 
 STAGE (luminosity, NoCtx)
 
 STAGE (srcover_rgba_8888, const SkRasterPipeline_MemoryCtx *ctx)
 
SI F clamp_01_ (F v)
 
 STAGE (clamp_01, NoCtx)
 
 STAGE (clamp_gamut, NoCtx)
 
 STAGE (set_rgb, const float *rgb)
 
 STAGE (unbounded_set_rgb, const float *rgb)
 
 STAGE (swap_rb, NoCtx)
 
 STAGE (swap_rb_dst, NoCtx)
 
 STAGE (move_src_dst, NoCtx)
 
 STAGE (move_dst_src, NoCtx)
 
 STAGE (swap_src_dst, NoCtx)
 
 STAGE (premul, NoCtx)
 
 STAGE (premul_dst, NoCtx)
 
 STAGE (unpremul, NoCtx)
 
 STAGE (unpremul_polar, NoCtx)
 
 STAGE (force_opaque, NoCtx)
 
 STAGE (force_opaque_dst, NoCtx)
 
 STAGE (rgb_to_hsl, NoCtx)
 
 STAGE (hsl_to_rgb, NoCtx)
 
 STAGE (css_lab_to_xyz, NoCtx)
 
 STAGE (css_oklab_to_linear_srgb, NoCtx)
 
 STAGE (css_oklab_gamut_map_to_linear_srgb, NoCtx)
 
 STAGE (css_hcl_to_lab, NoCtx)
 
SI F mod_ (F x, float y)
 
SI RGB css_hsl_to_srgb_ (F h, F s, F l)
 
 STAGE (css_hsl_to_srgb, NoCtx)
 
 STAGE (css_hwb_to_srgb, NoCtx)
 
SI F alpha_coverage_from_rgb_coverage (F a, F da, F cr, F cg, F cb)
 
 STAGE (scale_1_float, const float *c)
 
 STAGE (scale_u8, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (scale_565, const SkRasterPipeline_MemoryCtx *ctx)
 
SI F lerp (F from, F to, F t)
 
 STAGE (lerp_1_float, const float *c)
 
 STAGE (scale_native, const float scales[])
 
 STAGE (lerp_native, const float scales[])
 
 STAGE (lerp_u8, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (lerp_565, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (emboss, const SkRasterPipeline_EmbossCtx *ctx)
 
 STAGE (byte_tables, const SkRasterPipeline_TablesCtx *tables)
 
SI F strip_sign (F x, U32 *sign)
 
SI F apply_sign (F x, U32 sign)
 
 STAGE (parametric, const skcms_TransferFunction *ctx)
 
 STAGE (gamma_, const float *G)
 
 STAGE (PQish, const skcms_TransferFunction *ctx)
 
 STAGE (HLGish, const skcms_TransferFunction *ctx)
 
 STAGE (HLGinvish, const skcms_TransferFunction *ctx)
 
 STAGE (load_a8, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_a8_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_a8, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_a8, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (store_r8, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_565, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_565_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_565, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_565, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_4444, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_4444_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_4444, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_4444, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_8888, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_8888_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_8888, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_8888, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_rg88, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_rg88_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_rg88, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_rg88, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_a16, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_a16_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_a16, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_a16, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_rg1616, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_rg1616_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_rg1616, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_rg1616, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_16161616, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_16161616_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_16161616, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_16161616, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_10x6, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_10x6_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_10x6, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_10x6, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_1010102, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_1010102_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_1010102_xr, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_1010102_xr_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_1010102, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (gather_1010102_xr, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (gather_10101010_xr, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (load_10101010_xr, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_10101010_xr_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (store_10101010_xr, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (store_1010102, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (store_1010102_xr, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_f16, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_f16_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_f16, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_f16, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_af16, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_af16_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_af16, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_af16, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_rgf16, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_rgf16_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_rgf16, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_rgf16, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_f32, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_f32_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_f32, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_f32, const SkRasterPipeline_MemoryCtx *ctx)
 
SI F exclusive_repeat (F v, const SkRasterPipeline_TileCtx *ctx)
 
SI F exclusive_mirror (F v, const SkRasterPipeline_TileCtx *ctx)
 
 STAGE (repeat_x, const SkRasterPipeline_TileCtx *ctx)
 
 STAGE (repeat_y, const SkRasterPipeline_TileCtx *ctx)
 
 STAGE (mirror_x, const SkRasterPipeline_TileCtx *ctx)
 
 STAGE (mirror_y, const SkRasterPipeline_TileCtx *ctx)
 
 STAGE (clamp_x_1, NoCtx)
 
 STAGE (repeat_x_1, NoCtx)
 
 STAGE (mirror_x_1, NoCtx)
 
 STAGE (clamp_x_and_y, const SkRasterPipeline_CoordClampCtx *ctx)
 
 STAGE (decal_x, SkRasterPipeline_DecalTileCtx *ctx)
 
 STAGE (decal_y, SkRasterPipeline_DecalTileCtx *ctx)
 
 STAGE (decal_x_and_y, SkRasterPipeline_DecalTileCtx *ctx)
 
 STAGE (check_decal_mask, SkRasterPipeline_DecalTileCtx *ctx)
 
 STAGE (alpha_to_gray, NoCtx)
 
 STAGE (alpha_to_gray_dst, NoCtx)
 
 STAGE (alpha_to_red, NoCtx)
 
 STAGE (alpha_to_red_dst, NoCtx)
 
 STAGE (bt709_luminance_or_luma_to_alpha, NoCtx)
 
 STAGE (bt709_luminance_or_luma_to_rgb, NoCtx)
 
 STAGE (matrix_translate, const float *m)
 
 STAGE (matrix_scale_translate, const float *m)
 
 STAGE (matrix_2x3, const float *m)
 
 STAGE (matrix_3x3, const float *m)
 
 STAGE (matrix_3x4, const float *m)
 
 STAGE (matrix_4x5, const float *m)
 
 STAGE (matrix_4x3, const float *m)
 
 STAGE (matrix_perspective, const float *m)
 
SI void gradient_lookup (const SkRasterPipeline_GradientCtx *c, U32 idx, F t, F *r, F *g, F *b, F *a)
 
 STAGE (evenly_spaced_gradient, const SkRasterPipeline_GradientCtx *c)
 
 STAGE (gradient, const SkRasterPipeline_GradientCtx *c)
 
 STAGE (evenly_spaced_2_stop_gradient, const SkRasterPipeline_EvenlySpaced2StopGradientCtx *c)
 
 STAGE (xy_to_unit_angle, NoCtx)
 
 STAGE (xy_to_radius, NoCtx)
 
 STAGE (negate_x, NoCtx)
 
 STAGE (xy_to_2pt_conical_strip, const SkRasterPipeline_2PtConicalCtx *ctx)
 
 STAGE (xy_to_2pt_conical_focal_on_circle, NoCtx)
 
 STAGE (xy_to_2pt_conical_well_behaved, const SkRasterPipeline_2PtConicalCtx *ctx)
 
 STAGE (xy_to_2pt_conical_greater, const SkRasterPipeline_2PtConicalCtx *ctx)
 
 STAGE (xy_to_2pt_conical_smaller, const SkRasterPipeline_2PtConicalCtx *ctx)
 
 STAGE (alter_2pt_conical_compensate_focal, const SkRasterPipeline_2PtConicalCtx *ctx)
 
 STAGE (alter_2pt_conical_unswap, NoCtx)
 
 STAGE (mask_2pt_conical_nan, SkRasterPipeline_2PtConicalCtx *c)
 
 STAGE (mask_2pt_conical_degenerates, SkRasterPipeline_2PtConicalCtx *c)
 
 STAGE (apply_vector_mask, const uint32_t *ctx)
 
SI void save_xy (F *r, F *g, SkRasterPipeline_SamplerCtx *c)
 
 STAGE (accumulate, const SkRasterPipeline_SamplerCtx *c)
 
template<int kScale>
SI void bilinear_x (SkRasterPipeline_SamplerCtx *ctx, F *x)
 
template<int kScale>
SI void bilinear_y (SkRasterPipeline_SamplerCtx *ctx, F *y)
 
 STAGE (bilinear_setup, SkRasterPipeline_SamplerCtx *ctx)
 
 STAGE (bilinear_nx, SkRasterPipeline_SamplerCtx *ctx)
 
 STAGE (bilinear_px, SkRasterPipeline_SamplerCtx *ctx)
 
 STAGE (bilinear_ny, SkRasterPipeline_SamplerCtx *ctx)
 
 STAGE (bilinear_py, SkRasterPipeline_SamplerCtx *ctx)
 
SI F bicubic_wts (F t, float A, float B, float C, float D)
 
template<int kScale>
SI void bicubic_x (SkRasterPipeline_SamplerCtx *ctx, F *x)
 
template<int kScale>
SI void bicubic_y (SkRasterPipeline_SamplerCtx *ctx, F *y)
 
 STAGE (bicubic_setup, SkRasterPipeline_SamplerCtx *ctx)
 
 STAGE (bicubic_n3x, SkRasterPipeline_SamplerCtx *ctx)
 
 STAGE (bicubic_n1x, SkRasterPipeline_SamplerCtx *ctx)
 
 STAGE (bicubic_p1x, SkRasterPipeline_SamplerCtx *ctx)
 
 STAGE (bicubic_p3x, SkRasterPipeline_SamplerCtx *ctx)
 
 STAGE (bicubic_n3y, SkRasterPipeline_SamplerCtx *ctx)
 
 STAGE (bicubic_n1y, SkRasterPipeline_SamplerCtx *ctx)
 
 STAGE (bicubic_p1y, SkRasterPipeline_SamplerCtx *ctx)
 
 STAGE (bicubic_p3y, SkRasterPipeline_SamplerCtx *ctx)
 
SI F compute_perlin_vector (U32 sample, F x, F y)
 
 STAGE (perlin_noise, SkRasterPipeline_PerlinNoiseCtx *ctx)
 
 STAGE (mipmap_linear_init, SkRasterPipeline_MipmapCtx *ctx)
 
 STAGE (mipmap_linear_update, SkRasterPipeline_MipmapCtx *ctx)
 
 STAGE (mipmap_linear_finish, SkRasterPipeline_MipmapCtx *ctx)
 
 STAGE (callback, SkRasterPipeline_CallbackCtx *c)
 
 STAGE_TAIL (set_base_pointer, std::byte *p)
 
 STAGE_TAIL (init_lane_masks, SkRasterPipeline_InitLaneMasksCtx *ctx)
 
 STAGE_TAIL (store_device_xy01, F *dst)
 
 STAGE_TAIL (exchange_src, F *rgba)
 
 STAGE_TAIL (load_condition_mask, F *ctx)
 
 STAGE_TAIL (store_condition_mask, F *ctx)
 
 STAGE_TAIL (merge_condition_mask, I32 *ptr)
 
 STAGE_TAIL (merge_inv_condition_mask, I32 *ptr)
 
 STAGE_TAIL (load_loop_mask, F *ctx)
 
 STAGE_TAIL (store_loop_mask, F *ctx)
 
 STAGE_TAIL (mask_off_loop_mask, NoCtx)
 
 STAGE_TAIL (reenable_loop_mask, I32 *ptr)
 
 STAGE_TAIL (merge_loop_mask, I32 *ptr)
 
 STAGE_TAIL (continue_op, I32 *continueMask)
 
 STAGE_TAIL (case_op, SkRasterPipeline_CaseOpCtx *packed)
 
 STAGE_TAIL (load_return_mask, F *ctx)
 
 STAGE_TAIL (store_return_mask, F *ctx)
 
 STAGE_TAIL (mask_off_return_mask, NoCtx)
 
 STAGE_BRANCH (branch_if_all_lanes_active, SkRasterPipeline_BranchIfAllLanesActiveCtx *ctx)
 
 STAGE_BRANCH (branch_if_any_lanes_active, SkRasterPipeline_BranchCtx *ctx)
 
 STAGE_BRANCH (branch_if_no_lanes_active, SkRasterPipeline_BranchCtx *ctx)
 
 STAGE_BRANCH (jump, SkRasterPipeline_BranchCtx *ctx)
 
 STAGE_BRANCH (branch_if_no_active_lanes_eq, SkRasterPipeline_BranchIfEqualCtx *ctx)
 
 STAGE_TAIL (trace_line, SkRasterPipeline_TraceLineCtx *ctx)
 
 STAGE_TAIL (trace_enter, SkRasterPipeline_TraceFuncCtx *ctx)
 
 STAGE_TAIL (trace_exit, SkRasterPipeline_TraceFuncCtx *ctx)
 
 STAGE_TAIL (trace_scope, SkRasterPipeline_TraceScopeCtx *ctx)
 
 STAGE_TAIL (trace_var, SkRasterPipeline_TraceVarCtx *ctx)
 
 STAGE_TAIL (copy_uniform, SkRasterPipeline_UniformCtx *ctx)
 
 STAGE_TAIL (copy_2_uniforms, SkRasterPipeline_UniformCtx *ctx)
 
 STAGE_TAIL (copy_3_uniforms, SkRasterPipeline_UniformCtx *ctx)
 
 STAGE_TAIL (copy_4_uniforms, SkRasterPipeline_UniformCtx *ctx)
 
 STAGE_TAIL (copy_constant, SkRasterPipeline_ConstantCtx *packed)
 
 STAGE_TAIL (splat_2_constants, SkRasterPipeline_ConstantCtx *packed)
 
 STAGE_TAIL (splat_3_constants, SkRasterPipeline_ConstantCtx *packed)
 
 STAGE_TAIL (splat_4_constants, SkRasterPipeline_ConstantCtx *packed)
 
template<int NumSlots>
SI void copy_n_slots_unmasked_fn (SkRasterPipeline_BinaryOpCtx *packed, std::byte *base)
 
 STAGE_TAIL (copy_slot_unmasked, SkRasterPipeline_BinaryOpCtx *packed)
 
 STAGE_TAIL (copy_2_slots_unmasked, SkRasterPipeline_BinaryOpCtx *packed)
 
 STAGE_TAIL (copy_3_slots_unmasked, SkRasterPipeline_BinaryOpCtx *packed)
 
 STAGE_TAIL (copy_4_slots_unmasked, SkRasterPipeline_BinaryOpCtx *packed)
 
template<int NumSlots>
SI void copy_n_immutable_unmasked_fn (SkRasterPipeline_BinaryOpCtx *packed, std::byte *base)
 
 STAGE_TAIL (copy_immutable_unmasked, SkRasterPipeline_BinaryOpCtx *packed)
 
 STAGE_TAIL (copy_2_immutables_unmasked, SkRasterPipeline_BinaryOpCtx *packed)
 
 STAGE_TAIL (copy_3_immutables_unmasked, SkRasterPipeline_BinaryOpCtx *packed)
 
 STAGE_TAIL (copy_4_immutables_unmasked, SkRasterPipeline_BinaryOpCtx *packed)
 
template<int NumSlots>
SI void copy_n_slots_masked_fn (SkRasterPipeline_BinaryOpCtx *packed, std::byte *base, I32 mask)
 
 STAGE_TAIL (copy_slot_masked, SkRasterPipeline_BinaryOpCtx *packed)
 
 STAGE_TAIL (copy_2_slots_masked, SkRasterPipeline_BinaryOpCtx *packed)
 
 STAGE_TAIL (copy_3_slots_masked, SkRasterPipeline_BinaryOpCtx *packed)
 
 STAGE_TAIL (copy_4_slots_masked, SkRasterPipeline_BinaryOpCtx *packed)
 
template<int LoopCount, typename OffsetType >
SI void shuffle_fn (std::byte *ptr, OffsetType *offsets, int numSlots)
 
template<int N>
SI void small_swizzle_fn (SkRasterPipeline_SwizzleCtx *packed, std::byte *base)
 
 STAGE_TAIL (swizzle_1, SkRasterPipeline_SwizzleCtx *packed)
 
 STAGE_TAIL (swizzle_2, SkRasterPipeline_SwizzleCtx *packed)
 
 STAGE_TAIL (swizzle_3, SkRasterPipeline_SwizzleCtx *packed)
 
 STAGE_TAIL (swizzle_4, SkRasterPipeline_SwizzleCtx *packed)
 
 STAGE_TAIL (shuffle, SkRasterPipeline_ShuffleCtx *ctx)
 
template<int NumSlots>
SI void swizzle_copy_masked_fn (I32 *dst, const I32 *src, uint16_t *offsets, I32 mask)
 
 STAGE_TAIL (swizzle_copy_slot_masked, SkRasterPipeline_SwizzleCopyCtx *ctx)
 
 STAGE_TAIL (swizzle_copy_2_slots_masked, SkRasterPipeline_SwizzleCopyCtx *ctx)
 
 STAGE_TAIL (swizzle_copy_3_slots_masked, SkRasterPipeline_SwizzleCopyCtx *ctx)
 
 STAGE_TAIL (swizzle_copy_4_slots_masked, SkRasterPipeline_SwizzleCopyCtx *ctx)
 
 STAGE_TAIL (copy_from_indirect_unmasked, SkRasterPipeline_CopyIndirectCtx *ctx)
 
 STAGE_TAIL (copy_from_indirect_uniform_unmasked, SkRasterPipeline_CopyIndirectCtx *ctx)
 
 STAGE_TAIL (copy_to_indirect_masked, SkRasterPipeline_CopyIndirectCtx *ctx)
 
 STAGE_TAIL (swizzle_copy_to_indirect_masked, SkRasterPipeline_SwizzleCopyIndirectCtx *ctx)
 
template<typename T , void(*)(T *) ApplyFn>
SI void apply_adjacent_unary (T *dst, T *end)
 
template<typename T >
SI void cast_to_float_from_fn (T *dst)
 
SI void cast_to_int_from_fn (F *dst)
 
SI void cast_to_uint_from_fn (F *dst)
 
SI void abs_fn (I32 *dst)
 
SI void floor_fn (F *dst)
 
SI void ceil_fn (F *dst)
 
SI void invsqrt_fn (F *dst)
 
 DECLARE_UNARY_INT (cast_to_float_from) DECLARE_UNARY_UINT(cast_to_float_from) STAGE_TAIL(sin_float
 
 STAGE_TAIL (cos_float, F *dst)
 
 STAGE_TAIL (tan_float, F *dst)
 
 STAGE_TAIL (asin_float, F *dst)
 
 STAGE_TAIL (acos_float, F *dst)
 
 STAGE_TAIL (atan_float, F *dst)
 
 STAGE_TAIL (sqrt_float, F *dst)
 
 STAGE_TAIL (exp_float, F *dst)
 
 STAGE_TAIL (exp2_float, F *dst)
 
 STAGE_TAIL (log_float, F *dst)
 
 STAGE_TAIL (log2_float, F *dst)
 
 STAGE_TAIL (inverse_mat2, F *dst)
 
 STAGE_TAIL (inverse_mat3, F *dst)
 
 STAGE_TAIL (inverse_mat4, F *dst)
 
template<typename T , void(*)(T *, T *) ApplyFn>
SI void apply_adjacent_binary (T *dst, T *src)
 
template<typename T , void(*)(T *, T *) ApplyFn>
SI void apply_adjacent_binary_packed (SkRasterPipeline_BinaryOpCtx *packed, std::byte *base)
 
template<int N, typename V , typename S , void(*)(V *, V *) ApplyFn>
SI void apply_binary_immediate (SkRasterPipeline_ConstantCtx *packed, std::byte *base)
 
template<typename T >
SI void add_fn (T *dst, T *src)
 
template<typename T >
SI void sub_fn (T *dst, T *src)
 
template<typename T >
SI void mul_fn (T *dst, T *src)
 
template<typename T >
SI void div_fn (T *dst, T *src)
 
SI void bitwise_and_fn (I32 *dst, I32 *src)
 
SI void bitwise_or_fn (I32 *dst, I32 *src)
 
SI void bitwise_xor_fn (I32 *dst, I32 *src)
 
template<typename T >
SI void max_fn (T *dst, T *src)
 
template<typename T >
SI void min_fn (T *dst, T *src)
 
template<typename T >
SI void cmplt_fn (T *dst, T *src)
 
template<typename T >
SI void cmple_fn (T *dst, T *src)
 
template<typename T >
SI void cmpeq_fn (T *dst, T *src)
 
template<typename T >
SI void cmpne_fn (T *dst, T *src)
 
SI void atan2_fn (F *dst, F *src)
 
SI void pow_fn (F *dst, F *src)
 
SI void mod_fn (F *dst, F *src)
 
 DECLARE_BINARY_FLOAT (add) DECLARE_BINARY_INT(add) DECLARE_BINARY_FLOAT(sub) DECLARE_BINARY_INT(sub) DECLARE_BINARY_FLOAT(mul) DECLARE_BINARY_INT(mul) DECLARE_BINARY_FLOAT(div) DECLARE_BINARY_INT(div) DECLARE_BINARY_UINT(div) DECLARE_BINARY_FLOAT(min) DECLARE_BINARY_INT(min) DECLARE_BINARY_UINT(min) DECLARE_BINARY_FLOAT(max) DECLARE_BINARY_INT(max) DECLARE_BINARY_UINT(max) DECLARE_BINARY_FLOAT(cmplt) DECLARE_BINARY_INT(cmplt) DECLARE_BINARY_UINT(cmplt) DECLARE_BINARY_FLOAT(cmple) DECLARE_BINARY_INT(cmple) DECLARE_BINARY_UINT(cmple) DECLARE_BINARY_FLOAT(cmpeq) DECLARE_BINARY_INT(cmpeq) DECLARE_BINARY_FLOAT(cmpne) DECLARE_BINARY_INT(cmpne) DECLARE_IMM_BINARY_FLOAT(add) DECLARE_IMM_BINARY_INT(add) DECLARE_IMM_BINARY_FLOAT(mul) DECLARE_IMM_BINARY_INT(mul) DECLARE_IMM_BINARY_FLOAT(cmplt) DECLARE_IMM_BINARY_INT(cmplt) DECLARE_IMM_BINARY_UINT(cmplt) DECLARE_IMM_BINARY_FLOAT(cmple) DECLARE_IMM_BINARY_INT(cmple) DECLARE_IMM_BINARY_UINT(cmple) DECLARE_IMM_BINARY_FLOAT(cmpeq) DECLARE_IMM_BINARY_INT(cmpeq) DECLARE_IMM_BINARY_FLOAT(cmpne) DECLARE_IMM_BINARY_INT(cmpne) STAGE_TAIL(dot_2_floats
 
 STAGE_TAIL (dot_3_floats, F *dst)
 
 STAGE_TAIL (dot_4_floats, F *dst)
 
template<int N>
SI void matrix_multiply (SkRasterPipeline_MatrixMultiplyCtx *packed, std::byte *base)
 
 STAGE_TAIL (matrix_multiply_2, SkRasterPipeline_MatrixMultiplyCtx *packed)
 
 STAGE_TAIL (matrix_multiply_3, SkRasterPipeline_MatrixMultiplyCtx *packed)
 
 STAGE_TAIL (matrix_multiply_4, SkRasterPipeline_MatrixMultiplyCtx *packed)
 
 STAGE_TAIL (refract_4_floats, F *dst)
 
template<typename T , void(*)(T *, T *, T *) ApplyFn>
SI void apply_adjacent_ternary (T *dst, T *src0, T *src1)
 
template<typename T , void(*)(T *, T *, T *) ApplyFn>
SI void apply_adjacent_ternary_packed (SkRasterPipeline_TernaryOpCtx *packed, std::byte *base)
 
SI void mix_fn (F *a, F *x, F *y)
 
SI void mix_fn (I32 *a, I32 *x, I32 *y)
 
SI void smoothstep_fn (F *edge0, F *edge1, F *x)
 
 STAGE (gauss_a_to_rgba, NoCtx)
 
 STAGE (bilerp_clamp_8888, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (bicubic_clamp_8888, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (swizzle, void *ctx)
 
constexpr size_t raster_pipeline_lowp_stride ()
 
constexpr size_t raster_pipeline_highp_stride ()
 

Variables

static constexpr void(* S32_alpha_D32_filter_DXDY )(const SkBitmapProcState &, const uint32_t *, int, SkPMColor *) = nullptr
 
static constexpr F F0 = F_(0.0f)
 
static constexpr F F1 = F_(1.0f)
 
static constexpr size_t N = sizeof(F) / sizeof(float)
 
Fdst { *dst = sin_(*dst)
 

Typedef Documentation

◆ F

using SK_OPTS_NS::F = typedef float

Definition at line 134 of file SkRasterPipeline_opts.h.

◆ I32

using SK_OPTS_NS::I32 = typedef int32_t

Definition at line 135 of file SkRasterPipeline_opts.h.

◆ Stage

using SK_OPTS_NS::Stage = typedef void(ABI*)(Params*, SkRasterPipelineStage* program, F r, F g, F b, F a)

Definition at line 1524 of file SkRasterPipeline_opts.h.

◆ U16

using SK_OPTS_NS::U16 = typedef uint16_t

Definition at line 138 of file SkRasterPipeline_opts.h.

◆ U32

using SK_OPTS_NS::U32 = typedef uint32_t

Definition at line 137 of file SkRasterPipeline_opts.h.

◆ U64

using SK_OPTS_NS::U64 = typedef uint64_t

Definition at line 136 of file SkRasterPipeline_opts.h.

◆ U8

using SK_OPTS_NS::U8 = typedef uint8_t

Definition at line 139 of file SkRasterPipeline_opts.h.

Function Documentation

◆ abs_() [1/2]

SI F SK_OPTS_NS::abs_ ( F  v)

Definition at line 150 of file SkRasterPipeline_opts.h.

150{ return fabsf(v); }

◆ abs_() [2/2]

SI I32 SK_OPTS_NS::abs_ ( I32  v)

Definition at line 151 of file SkRasterPipeline_opts.h.

151{ return v < 0 ? -v : v; }

◆ abs_fn()

SI void SK_OPTS_NS::abs_fn ( I32 dst)

Definition at line 4399 of file SkRasterPipeline_opts.h.

4399 {
4400 *dst = abs_(*dst);
4401}

◆ acos_()

SI F SK_OPTS_NS::acos_ ( F  x)

Definition at line 1957 of file SkRasterPipeline_opts.h.

1957 {
1958 return SK_FloatPI/2 - asin_(x);
1959}
constexpr float SK_FloatPI
double x

◆ add_fn()

template<typename T >
SI void SK_OPTS_NS::add_fn ( T dst,
T src 
)

Definition at line 4570 of file SkRasterPipeline_opts.h.

4570 {
4571 *dst += *src;
4572}

◆ all()

SI bool SK_OPTS_NS::all ( I32  c)

Definition at line 169 of file SkRasterPipeline_opts.h.

169{ return c != 0; }

◆ alpha_coverage_from_rgb_coverage()

SI F SK_OPTS_NS::alpha_coverage_from_rgb_coverage ( F  a,
F  da,
F  cr,
F  cg,
F  cb 
)

Definition at line 2616 of file SkRasterPipeline_opts.h.

2616 {
2617 return if_then_else(a < da, min(cr, min(cg,cb))
2618 , max(cr, max(cg,cb)));
2619}
SI T if_then_else(C cond, T t, T e)
struct MyStruct a[10]
static float max(float r, float g, float b)
Definition hsl.cpp:49
static float min(float r, float g, float b)
Definition hsl.cpp:48

◆ any()

SI bool SK_OPTS_NS::any ( I32  c)

Definition at line 168 of file SkRasterPipeline_opts.h.

168{ return c != 0; }

◆ apply_adjacent_binary()

template<typename T , void(*)(T *, T *) ApplyFn>
SI void SK_OPTS_NS::apply_adjacent_binary ( T dst,
T src 
)

Definition at line 4540 of file SkRasterPipeline_opts.h.

4540 {
4541 T* end = src;
4542 do {
4543 ApplyFn(dst, src);
4544 dst += 1;
4545 src += 1;
4546 } while (dst != end);
4547}
glong glong end
#define T

◆ apply_adjacent_binary_packed()

template<typename T , void(*)(T *, T *) ApplyFn>
SI void SK_OPTS_NS::apply_adjacent_binary_packed ( SkRasterPipeline_BinaryOpCtx packed,
std::byte *  base 
)

Definition at line 4550 of file SkRasterPipeline_opts.h.

4550 {
4551 auto ctx = SkRPCtxUtils::Unpack(packed);
4552 std::byte* dst = base + ctx.dst;
4553 std::byte* src = base + ctx.src;
4554 apply_adjacent_binary<T, ApplyFn>((T*)dst, (T*)src);
4555}
static UnpackedType< T > Unpack(const T *ctx)

◆ apply_adjacent_ternary()

template<typename T , void(*)(T *, T *, T *) ApplyFn>
SI void SK_OPTS_NS::apply_adjacent_ternary ( T dst,
T src0,
T src1 
)

Definition at line 4869 of file SkRasterPipeline_opts.h.

4869 {
4870 int count = src0 - dst;
4871#if !defined(JUMPER_IS_SCALAR)
4872 SK_ASSUME(count >= 1);
4873#endif
4874
4875 for (int index = 0; index < count; ++index) {
4876 ApplyFn(dst, src0, src1);
4877 dst += 1;
4878 src0 += 1;
4879 src1 += 1;
4880 }
4881}
int count
#define SK_ASSUME(cond)
Definition SkAssert.h:44

◆ apply_adjacent_ternary_packed()

template<typename T , void(*)(T *, T *, T *) ApplyFn>
SI void SK_OPTS_NS::apply_adjacent_ternary_packed ( SkRasterPipeline_TernaryOpCtx packed,
std::byte *  base 
)

Definition at line 4884 of file SkRasterPipeline_opts.h.

4884 {
4885 auto ctx = SkRPCtxUtils::Unpack(packed);
4886 std::byte* dst = base + ctx.dst;
4887 std::byte* src0 = dst + ctx.delta;
4888 std::byte* src1 = src0 + ctx.delta;
4889 apply_adjacent_ternary<T, ApplyFn>((T*)dst, (T*)src0, (T*)src1);
4890}

◆ apply_adjacent_unary()

template<typename T , void(*)(T *) ApplyFn>
SI void SK_OPTS_NS::apply_adjacent_unary ( T dst,
T end 
)

Definition at line 4368 of file SkRasterPipeline_opts.h.

4368 {
4369 do {
4370 ApplyFn(dst);
4371 dst += 1;
4372 } while (dst != end);
4373}

◆ apply_binary_immediate()

template<int N, typename V , typename S , void(*)(V *, V *) ApplyFn>
SI void SK_OPTS_NS::apply_binary_immediate ( SkRasterPipeline_ConstantCtx packed,
std::byte *  base 
)

Definition at line 4558 of file SkRasterPipeline_opts.h.

4558 {
4559 auto ctx = SkRPCtxUtils::Unpack(packed);
4560 V* dst = (V*)(base + ctx.dst); // get a pointer to the destination
4561 S scalar = sk_bit_cast<S>(ctx.value); // bit-pun the constant value as desired
4562 V src = scalar - V(); // broadcast the constant value into a vector
4563 SK_UNROLL for (int index = 0; index < N; ++index) {
4564 ApplyFn(dst, &src); // perform the operation
4565 dst += 1;
4566 }
4567}
#define SK_UNROLL
#define N
Definition beziers.cpp:19
T __attribute__((ext_vector_type(N))) V
#define V(name)
Definition raw_object.h:124

◆ apply_sign()

SI F SK_OPTS_NS::apply_sign ( F  x,
U32  sign 
)

Definition at line 2726 of file SkRasterPipeline_opts.h.

2726 {
2727 return sk_bit_cast<F>(sign | sk_bit_cast<U32>(x));
2728}
static int sign(SkScalar x)
Definition SkPath.cpp:2141

◆ approx_atan_unit()

SI F SK_OPTS_NS::approx_atan_unit ( F  x)

Definition at line 1915 of file SkRasterPipeline_opts.h.

1915 {
1916 // y = 0.14130025741326729 x⁴
1917 // - 0.34312835980675116 x³
1918 // - 0.016172900528248768 x²
1919 // + 1.00376969762003850 x
1920 // - 0.00014758242182738969
1921 const float c4 = 0.14130025741326729f;
1922 const float c3 = -0.34312835980675116f;
1923 const float c2 = -0.016172900528248768f;
1924 const float c1 = 1.0037696976200385f;
1925 const float c0 = -0.00014758242182738969f;
1926 return mad(x, mad(x, mad(x, mad(x, c4, c3), c2), c1), c0);
1927}
SI F mad(F f, F m, F a)

◆ approx_exp()

SI F SK_OPTS_NS::approx_exp ( F  x)

Definition at line 1386 of file SkRasterPipeline_opts.h.

1386 {
1387 const float log2_e = 1.4426950408889634074f;
1388 return approx_pow2(log2_e * x);
1389}

◆ approx_log()

SI F SK_OPTS_NS::approx_log ( F  x)

Definition at line 1369 of file SkRasterPipeline_opts.h.

1369 {
1370 const float ln2 = 0.69314718f;
1371 return ln2 * approx_log2(x);
1372}
SI F approx_log2(F x)

◆ approx_log2()

SI F SK_OPTS_NS::approx_log2 ( F  x)

Definition at line 1359 of file SkRasterPipeline_opts.h.

1359 {
1360 // e - 127 is a fair approximation of log2(x) in its own right...
1361 F e = cast(sk_bit_cast<U32>(x)) * (1.0f / (1<<23));
1362
1363 // ... but using the mantissa to refine its error is _much_ better.
1364 F m = sk_bit_cast<F>((sk_bit_cast<U32>(x) & 0x007fffff) | 0x3f000000);
1365
1366 return nmad(m, 1.498030302f, e - 124.225514990f) - 1.725879990f / (0.3520887068f + m);
1367}
SI D cast(const S &v)
SI F nmad(F f, F m, F a)
Definition SkMD5.cpp:120

◆ approx_pow2()

SI F SK_OPTS_NS::approx_pow2 ( F  x)

Definition at line 1374 of file SkRasterPipeline_opts.h.

1374 {
1375 constexpr float kInfinityBits = 0x7f800000;
1376
1377 F f = fract(x);
1378 F approx = nmad(f, 1.490129070f, x + 121.274057500f);
1379 approx += 27.728023300f / (4.84252568f - f);
1380 approx *= 1.0f * (1<<23);
1381 approx = min(max(approx, F0), F_(kInfinityBits)); // guard against underflow/overflow
1382
1383 return sk_bit_cast<F>(round(approx));
1384}
static void round(SkPoint *p)
constexpr auto F_
static constexpr F F0

◆ approx_powf()

SI F SK_OPTS_NS::approx_powf ( F  x,
F  y 
)

Definition at line 1391 of file SkRasterPipeline_opts.h.

1391 {
1392 return if_then_else((x == 0)|(x == 1), x
1393 , approx_pow2(approx_log2(x) * y));
1394}
double y

◆ asin_()

SI F SK_OPTS_NS::asin_ ( F  x)

Definition at line 1944 of file SkRasterPipeline_opts.h.

1944 {
1945 I32 neg = (x < 0.0f);
1946 x = if_then_else(neg, -x, x);
1947 const float c3 = -0.0187293f;
1948 const float c2 = 0.0742610f;
1949 const float c1 = -0.2121144f;
1950 const float c0 = 1.5707288f;
1951 F poly = mad(x, mad(x, mad(x, c3, c2), c1), c0);
1952 x = nmad(sqrt_(1 - x), poly, SK_FloatPI/2);
1953 x = if_then_else(neg, -x, x);
1954 return x;
1955}

◆ atan2_()

SI F SK_OPTS_NS::atan2_ ( F  y0,
F  x0 
)

Definition at line 1965 of file SkRasterPipeline_opts.h.

1965 {
1966 I32 flip = (abs_(y0) > abs_(x0));
1967 F y = if_then_else(flip, x0, y0);
1968 F x = if_then_else(flip, y0, x0);
1969 F arg = y/x;
1970
1971 I32 neg = (arg < 0.0f);
1972 arg = if_then_else(neg, -arg, arg);
1973
1974 F r = approx_atan_unit(arg);
1975 r = if_then_else(flip, SK_FloatPI/2 - r, r);
1976 r = if_then_else(neg, -r, r);
1977
1978 // handle quadrant distinctions
1979 r = if_then_else((y0 >= 0) & (x0 < 0), r + SK_FloatPI, r);
1980 r = if_then_else((y0 < 0) & (x0 <= 0), r - SK_FloatPI, r);
1981 // Note: we don't try to handle 0,0 or infinities
1982 return r;
1983}

◆ atan2_fn()

SI void SK_OPTS_NS::atan2_fn ( F dst,
F src 
)

Definition at line 4644 of file SkRasterPipeline_opts.h.

4644 {
4645 *dst = atan2_(*dst, *src);
4646}
SI F atan2_(F y0, F x0)

◆ atan_()

SI F SK_OPTS_NS::atan_ ( F  x)

Definition at line 1930 of file SkRasterPipeline_opts.h.

1930 {
1931 I32 neg = (x < 0.0f);
1932 x = if_then_else(neg, -x, x);
1933 I32 flip = (x > 1.0f);
1934 x = if_then_else(flip, 1/x, x);
1935 x = approx_atan_unit(x);
1936 x = if_then_else(flip, SK_FloatPI/2 - x, x);
1937 x = if_then_else(neg, -x, x);
1938 return x;
1939}

◆ bicubic_wts()

SI F SK_OPTS_NS::bicubic_wts ( F  t,
float  A,
float  B,
float  C,
float  D 
)

Definition at line 3634 of file SkRasterPipeline_opts.h.

3634 {
3635 return mad(t, mad(t, mad(t, D, C), B), A);
3636}

◆ bicubic_x()

template<int kScale>
SI void SK_OPTS_NS::bicubic_x ( SkRasterPipeline_SamplerCtx ctx,
F x 
)

Definition at line 3639 of file SkRasterPipeline_opts.h.

3639 {
3640 *x = sk_unaligned_load<F>(ctx->x) + (kScale * 0.5f);
3641
3642 F scalex;
3643 if (kScale == -3) { scalex = sk_unaligned_load<F>(ctx->wx[0]); }
3644 if (kScale == -1) { scalex = sk_unaligned_load<F>(ctx->wx[1]); }
3645 if (kScale == +1) { scalex = sk_unaligned_load<F>(ctx->wx[2]); }
3646 if (kScale == +3) { scalex = sk_unaligned_load<F>(ctx->wx[3]); }
3647 sk_unaligned_store(ctx->scalex, scalex);
3648}
static SK_ALWAYS_INLINE void SK_FP_SAFE_ABI sk_unaligned_store(P *ptr, T val)
Definition SkUtils.h:61
float scalex[SkRasterPipeline_kMaxStride_highp]
float x[SkRasterPipeline_kMaxStride_highp]
float wx[4][SkRasterPipeline_kMaxStride_highp]
static constexpr int kScale

◆ bicubic_y()

template<int kScale>
SI void SK_OPTS_NS::bicubic_y ( SkRasterPipeline_SamplerCtx ctx,
F y 
)

Definition at line 3650 of file SkRasterPipeline_opts.h.

3650 {
3651 *y = sk_unaligned_load<F>(ctx->y) + (kScale * 0.5f);
3652
3653 F scaley;
3654 if (kScale == -3) { scaley = sk_unaligned_load<F>(ctx->wy[0]); }
3655 if (kScale == -1) { scaley = sk_unaligned_load<F>(ctx->wy[1]); }
3656 if (kScale == +1) { scaley = sk_unaligned_load<F>(ctx->wy[2]); }
3657 if (kScale == +3) { scaley = sk_unaligned_load<F>(ctx->wy[3]); }
3658 sk_unaligned_store(ctx->scaley, scaley);
3659}
float wy[4][SkRasterPipeline_kMaxStride_highp]
float scaley[SkRasterPipeline_kMaxStride_highp]
float y[SkRasterPipeline_kMaxStride_highp]

◆ bilinear_x()

template<int kScale>
SI void SK_OPTS_NS::bilinear_x ( SkRasterPipeline_SamplerCtx ctx,
F x 
)

Definition at line 3596 of file SkRasterPipeline_opts.h.

3596 {
3597 *x = sk_unaligned_load<F>(ctx->x) + (kScale * 0.5f);
3598 F fx = sk_unaligned_load<F>(ctx->fx);
3599
3600 F scalex;
3601 if (kScale == -1) { scalex = 1.0f - fx; }
3602 if (kScale == +1) { scalex = fx; }
3603 sk_unaligned_store(ctx->scalex, scalex);
3604}
float fx[SkRasterPipeline_kMaxStride_highp]

◆ bilinear_y()

template<int kScale>
SI void SK_OPTS_NS::bilinear_y ( SkRasterPipeline_SamplerCtx ctx,
F y 
)

Definition at line 3606 of file SkRasterPipeline_opts.h.

3606 {
3607 *y = sk_unaligned_load<F>(ctx->y) + (kScale * 0.5f);
3608 F fy = sk_unaligned_load<F>(ctx->fy);
3609
3610 F scaley;
3611 if (kScale == -1) { scaley = 1.0f - fy; }
3612 if (kScale == +1) { scaley = fy; }
3613 sk_unaligned_store(ctx->scaley, scaley);
3614}
float fy[SkRasterPipeline_kMaxStride_highp]

◆ bitwise_and_fn()

SI void SK_OPTS_NS::bitwise_and_fn ( I32 dst,
I32 src 
)

Definition at line 4594 of file SkRasterPipeline_opts.h.

4594 {
4595 *dst &= *src;
4596}

◆ bitwise_or_fn()

SI void SK_OPTS_NS::bitwise_or_fn ( I32 dst,
I32 src 
)

Definition at line 4598 of file SkRasterPipeline_opts.h.

4598 {
4599 *dst |= *src;
4600}

◆ bitwise_xor_fn()

SI void SK_OPTS_NS::bitwise_xor_fn ( I32 dst,
I32 src 
)

Definition at line 4602 of file SkRasterPipeline_opts.h.

4602 {
4603 *dst ^= *src;
4604}

◆ BLEND_MODE() [1/23]

SK_OPTS_NS::BLEND_MODE ( clear  )

Definition at line 2175 of file SkRasterPipeline_opts.h.

2175{ return F0; }

◆ BLEND_MODE() [2/23]

SK_OPTS_NS::BLEND_MODE ( colorburn  )

Definition at line 2208 of file SkRasterPipeline_opts.h.

2208 {
2209 return if_then_else(d == da, d + s*inv(da),
2210 if_then_else(s == 0, /* s + */ d*inv(sa),
2211 sa*(da - min(da, (da-d)*sa*rcp_fast(s))) + s*inv(da) + d*inv(sa)));
2212}
static SkM44 inv(const SkM44 &m)
Definition 3d.cpp:26
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE auto & d
Definition main.cc:19
struct MyStruct s

◆ BLEND_MODE() [3/23]

SK_OPTS_NS::BLEND_MODE ( colordodge  )

Definition at line 2213 of file SkRasterPipeline_opts.h.

2213 {
2214 return if_then_else(d == 0, /* d + */ s*inv(da),
2215 if_then_else(s == sa, s + d*inv(sa),
2216 sa*min(da, (d*sa)*rcp_fast(sa - s)) + s*inv(da) + d*inv(sa)));
2217}

◆ BLEND_MODE() [4/23]

SK_OPTS_NS::BLEND_MODE ( darken  )

Definition at line 2203 of file SkRasterPipeline_opts.h.

2203{ return s + d - max(s*da, d*sa) ; }

◆ BLEND_MODE() [5/23]

SK_OPTS_NS::BLEND_MODE ( difference  )

Definition at line 2205 of file SkRasterPipeline_opts.h.

2205{ return s + d - two(min(s*da, d*sa)); }

◆ BLEND_MODE() [6/23]

SK_OPTS_NS::BLEND_MODE ( dstatop  )

Definition at line 2177 of file SkRasterPipeline_opts.h.

2177{ return d*sa + s*inv(da); }

◆ BLEND_MODE() [7/23]

SK_OPTS_NS::BLEND_MODE ( dstin  )

Definition at line 2179 of file SkRasterPipeline_opts.h.

2179{ return d * sa; }

◆ BLEND_MODE() [8/23]

SK_OPTS_NS::BLEND_MODE ( dstout  )

Definition at line 2181 of file SkRasterPipeline_opts.h.

2181{ return d * inv(sa); }

◆ BLEND_MODE() [9/23]

SK_OPTS_NS::BLEND_MODE ( dstover  )

Definition at line 2183 of file SkRasterPipeline_opts.h.

2183{ return mad(s, inv(da), d); }

◆ BLEND_MODE() [10/23]

SK_OPTS_NS::BLEND_MODE ( exclusion  )

Definition at line 2206 of file SkRasterPipeline_opts.h.

2206{ return s + d - two(s*d); }

◆ BLEND_MODE() [11/23]

SK_OPTS_NS::BLEND_MODE ( hardlight  )

Definition at line 2218 of file SkRasterPipeline_opts.h.

2218 {
2219 return s*inv(da) + d*inv(sa)
2220 + if_then_else(two(s) <= sa, two(s*d), sa*da - two((da-d)*(sa-s)));
2221}

◆ BLEND_MODE() [12/23]

SK_OPTS_NS::BLEND_MODE ( lighten  )

Definition at line 2204 of file SkRasterPipeline_opts.h.

2204{ return s + d - min(s*da, d*sa) ; }

◆ BLEND_MODE() [13/23]

SK_OPTS_NS::BLEND_MODE ( modulate  )

Definition at line 2185 of file SkRasterPipeline_opts.h.

2185{ return s*d; }

◆ BLEND_MODE() [14/23]

SK_OPTS_NS::BLEND_MODE ( multiply  )

Definition at line 2186 of file SkRasterPipeline_opts.h.

2186{ return s*inv(da) + d*inv(sa) + s*d; }

◆ BLEND_MODE() [15/23]

SK_OPTS_NS::BLEND_MODE ( overlay  )

Definition at line 2222 of file SkRasterPipeline_opts.h.

2222 {
2223 return s*inv(da) + d*inv(sa)
2224 + if_then_else(two(d) <= da, two(s*d), sa*da - two((da-d)*(sa-s)));
2225}

◆ BLEND_MODE() [16/23]

SK_OPTS_NS::BLEND_MODE ( plus_  )

Definition at line 2187 of file SkRasterPipeline_opts.h.

2187{ return min(s + d, 1.0f); } // We can clamp to either 1 or sa.

◆ BLEND_MODE() [17/23]

SK_OPTS_NS::BLEND_MODE ( screen  )

Definition at line 2188 of file SkRasterPipeline_opts.h.

2188{ return s + d - s*d; }

◆ BLEND_MODE() [18/23]

SK_OPTS_NS::BLEND_MODE ( softlight  )

Definition at line 2227 of file SkRasterPipeline_opts.h.

2227 {
2228 F m = if_then_else(da > 0, d / da, 0.0f),
2229 s2 = two(s),
2230 m4 = two(two(m));
2231
2232 // The logic forks three ways:
2233 // 1. dark src?
2234 // 2. light src, dark dst?
2235 // 3. light src, light dst?
2236 F darkSrc = d*(sa + (s2 - sa)*(1.0f - m)), // Used in case 1.
2237 darkDst = (m4*m4 + m4)*(m - 1.0f) + 7.0f*m, // Used in case 2.
2238 liteDst = sqrt_(m) - m,
2239 liteSrc = d*sa + da*(s2 - sa) * if_then_else(two(two(d)) <= da, darkDst, liteDst); // 2 or 3?
2240 return s*inv(da) + d*inv(sa) + if_then_else(s2 <= sa, darkSrc, liteSrc); // 1 or (2 or 3)?
2241}

◆ BLEND_MODE() [19/23]

SK_OPTS_NS::BLEND_MODE ( srcatop  )

Definition at line 2176 of file SkRasterPipeline_opts.h.

2176{ return s*da + d*inv(sa); }

◆ BLEND_MODE() [20/23]

SK_OPTS_NS::BLEND_MODE ( srcin  )

Definition at line 2178 of file SkRasterPipeline_opts.h.

2178{ return s * da; }

◆ BLEND_MODE() [21/23]

SK_OPTS_NS::BLEND_MODE ( srcout  )

Definition at line 2180 of file SkRasterPipeline_opts.h.

2180{ return s * inv(da); }

◆ BLEND_MODE() [22/23]

SK_OPTS_NS::BLEND_MODE ( srcover  )

Definition at line 2182 of file SkRasterPipeline_opts.h.

2182{ return mad(d, inv(sa), s); }

◆ BLEND_MODE() [23/23]

SK_OPTS_NS::BLEND_MODE ( xor_  )

Definition at line 2189 of file SkRasterPipeline_opts.h.

2189{ return s*inv(da) + d*inv(sa); }

◆ blit_mask_d32_a8()

void SK_OPTS_NS::blit_mask_d32_a8 ( SkPMColor dst,
size_t  dstRB,
const SkAlpha mask,
size_t  maskRB,
SkColor  color,
int  w,
int  h 
)
inline

Definition at line 400 of file SkBlitMask_opts.h.

402 {
403 if (color == SK_ColorBLACK) {
404 blit_mask_d32_a8_black(dst, dstRB, mask, maskRB, w, h);
405 } else if (SkColorGetA(color) == 0xFF) {
406 blit_mask_d32_a8_opaque(dst, dstRB, mask, maskRB, color, w, h);
407 } else {
408 blit_mask_d32_a8_general(dst, dstRB, mask, maskRB, color, w, h);
409 }
410}
SkColor4f color
constexpr SkColor SK_ColorBLACK
Definition SkColor.h:103
#define SkColorGetA(color)
Definition SkColor.h:61
static void blit_mask_d32_a8_general(SkPMColor *dst, size_t dstRB, const SkAlpha *mask, size_t maskRB, SkColor color, int w, int h)
static void blit_mask_d32_a8_black(SkPMColor *dst, size_t dstRB, const SkAlpha *maskPtr, size_t maskRB, int width, int height)
SkScalar w
SkScalar h

◆ blit_mask_d32_a8_black()

static void SK_OPTS_NS::blit_mask_d32_a8_black ( SkPMColor dst,
size_t  dstRB,
const SkAlpha maskPtr,
size_t  maskRB,
int  width,
int  height 
)
static

Definition at line 275 of file SkBlitMask_opts.h.

277 {
279 const uint8_t* SK_RESTRICT mask = (const uint8_t*)maskPtr;
280
281 // Nine patch may set maskRB to 0 to blit the same row repeatedly.
282 ptrdiff_t mask_adjust = (ptrdiff_t)maskRB - width;
283 dstRB -= (width << 2);
284 const __m128i zeros = __lsx_vldi(0);
285 __m128i planar = __lsx_vldi(0);
286 planar = __lsx_vinsgr2vr_d(planar, 0x0d0905010c080400, 0);
287 planar = __lsx_vinsgr2vr_d(planar, 0x0f0b07030e0a0602, 1);
288
289 do {
290 int w = width;
291 while (w >= 8) {
292 __m128i vmask = __lsx_vld(mask, 0);
293 vmask = __lsx_vilvl_b(zeros, vmask);
294 __m128i vscale = __lsx_vsub_h(__lsx_vreplgr2vr_h(256), vmask);
295 __m128i lo = __lsx_vld(device, 0); // bgra bgra bgra bgra
296 __m128i hi = __lsx_vld(device, 16); // BGRA BGRA BGRA BGRA
297 lo = __lsx_vshuf_b(zeros, lo, planar); // bbbb gggg rrrr aaaa
298 hi = __lsx_vshuf_b(zeros, hi, planar); // BBBB GGGG RRRR AAAA
299 __m128i bg = __lsx_vilvl_w(hi, lo), // bbbb BBBB gggg GGGG
300 ra = __lsx_vilvh_w(hi, lo); // rrrr RRRR aaaa AAAA
301
302 __m128i b = __lsx_vilvl_b(zeros, bg), // _b_b _b_b _B_B _B_B
303 g = __lsx_vilvh_b(zeros, bg), // _g_g _g_g _G_G _G_G
304 r = __lsx_vilvl_b(zeros, ra), // _r_r _r_r _R_R _R_R
305 a = __lsx_vilvh_b(zeros, ra); // _a_a _a_a _A_A _A_A
306
307 b = SkAlphaMul_lsx(b, vscale);
308 g = SkAlphaMul_lsx(g, vscale);
309 r = SkAlphaMul_lsx(r, vscale);
310 a = SkAlphaMul_lsx(a, vscale);
311
312 a += vmask;
313
314 bg = __lsx_vor_v(b, __lsx_vslli_h(g, 8)); // bgbg bgbg BGBG BGBG
315 ra = __lsx_vor_v(r, __lsx_vslli_h(a, 8)); // rara rara RARA RARA
316 lo = __lsx_vilvl_h(ra, bg); // bgra bgra bgra bgra
317 hi = __lsx_vilvh_h(ra, bg); // BGRA BGRA BGRA BGRA
318
319 __lsx_vst(lo, device, 0);
320 __lsx_vst(hi, device, 16);
321
322 mask += 8;
323 device += 8;
324 w -= 8;
325 }
326
327 while (w-- > 0) {
328 unsigned aa = *mask++;
329 *device = (aa << SK_A32_SHIFT)
330 + SkAlphaMulQ(*device, SkAlpha255To256(255 - aa));
331 device += 1;
332 }
333
334 device = (uint32_t*)((char*)device + dstRB);
335 mask += mask_adjust;
336
337 } while (--height != 0);
338 }
static SK_ALWAYS_INLINE uint32_t SkAlphaMulQ(uint32_t c, unsigned scale)
static unsigned SkAlpha255To256(U8CPU alpha)
Definition SkColorPriv.h:24
uint32_t SkPMColor
Definition SkColor.h:205
#define SK_RESTRICT
Definition SkFeatures.h:42
#define SK_A32_SHIFT
Definition SkTypes.h:54
VkDevice device
Definition main.cc:53
static bool b
static __m128i SkAlphaMul_lsx(__m128i x, __m128i y)
int32_t height
int32_t width

◆ blit_mask_d32_a8_general()

static void SK_OPTS_NS::blit_mask_d32_a8_general ( SkPMColor dst,
size_t  dstRB,
const SkAlpha mask,
size_t  maskRB,
SkColor  color,
int  w,
int  h 
)
static

Definition at line 262 of file SkBlitMask_opts.h.

264 {
265 D32_A8_Opaque_Color_lsx<true>(dst, dstRB, mask, maskRB, color, w, h);
266 }

◆ blit_mask_d32_a8_opaque()

static void SK_OPTS_NS::blit_mask_d32_a8_opaque ( SkPMColor dst,
size_t  dstRB,
const SkAlpha mask,
size_t  maskRB,
SkColor  color,
int  w,
int  h 
)
static

Definition at line 268 of file SkBlitMask_opts.h.

270 {
271 D32_A8_Opaque_Color_lsx<false>(dst, dstRB, mask, maskRB, color, w, h);
272 }

◆ blit_row_color32()

void SK_OPTS_NS::blit_row_color32 ( SkPMColor dst,
int  count,
SkPMColor  color 
)
inline

Definition at line 243 of file SkBlitRow_opts.h.

243 {
244 constexpr int N = 4; // 8, 16 also reasonable choices
248
249 auto kernel = [color](U32 src) {
250 unsigned invA = 255 - SkGetPackedA32(color);
251 invA += invA >> 7;
252 SkASSERT(0 < invA && invA < 256); // We handle alpha == 0 or alpha == 255 specially.
253
254 // (src * invA + (color << 8) + 128) >> 8
255 // Should all fit in 16 bits.
256 U8 s = sk_bit_cast<U8>(src),
257 a = U8(invA);
258 U16 c = skvx::cast<uint16_t>(sk_bit_cast<U8>(U32(color))),
259 d = (mull(s,a) + (c << 8) + 128)>>8;
260 return sk_bit_cast<U32>(skvx::cast<uint8_t>(d));
261 };
262
263 while (count >= N) {
264 kernel(U32::Load(dst)).store(dst);
265 dst += N;
266 count -= N;
267 }
268 while (count --> 0) {
269 *dst = kernel(U32{*dst})[0];
270 dst++;
271 }
272}
#define SkASSERT(cond)
Definition SkAssert.h:116
#define SkGetPackedA32(packed)
Definition SkColorPriv.h:92
V< uint8_t > U8
V< uint32_t > U32
dst
Definition cp.py:12

◆ blit_row_s32a_opaque()

void SK_OPTS_NS::blit_row_s32a_opaque ( SkPMColor dst,
const SkPMColor src,
int  len,
U8CPU  alpha 
)
inline

Definition at line 164 of file SkBlitRow_opts.h.

164 {
165 SkASSERT(alpha == 0xFF);
166 sk_msan_assert_initialized(src, src+len);
167
168#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
169 while (len >= 8) {
170 _mm256_storeu_si256((__m256i*)dst,
171 SkPMSrcOver_AVX2(_mm256_loadu_si256((const __m256i*)src),
172 _mm256_loadu_si256((const __m256i*)dst)));
173 src += 8;
174 dst += 8;
175 len -= 8;
176 }
177#endif
178
179#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
180 while (len >= 4) {
181 _mm_storeu_si128((__m128i*)dst, SkPMSrcOver_SSE2(_mm_loadu_si128((const __m128i*)src),
182 _mm_loadu_si128((const __m128i*)dst)));
183 src += 4;
184 dst += 4;
185 len -= 4;
186 }
187#endif
188
189#if defined(SK_ARM_HAS_NEON)
190 while (len >= 8) {
191 vst4_u8((uint8_t*)dst, SkPMSrcOver_neon8(vld4_u8((const uint8_t*)dst),
192 vld4_u8((const uint8_t*)src)));
193 src += 8;
194 dst += 8;
195 len -= 8;
196 }
197
198 while (len >= 2) {
199 vst1_u8((uint8_t*)dst, SkPMSrcOver_neon2(vld1_u8((const uint8_t*)dst),
200 vld1_u8((const uint8_t*)src)));
201 src += 2;
202 dst += 2;
203 len -= 2;
204 }
205
206 if (len != 0) {
207 uint8x8_t result = SkPMSrcOver_neon2(vcreate_u8((uint64_t)*dst),
208 vcreate_u8((uint64_t)*src));
209 vst1_lane_u32(dst, vreinterpret_u32_u8(result), 0);
210 }
211 return;
212#endif
213
214#if SK_CPU_LSX_LEVEL >= SK_CPU_LSX_LEVEL_LASX
215 while (len >= 8) {
216 __lasx_xvst(SkPMSrcOver_LASX(__lasx_xvld(src, 0),
217 __lasx_xvld(dst, 0)), (__m256i*)dst, 0);
218 src += 8;
219 dst += 8;
220 len -= 8;
221 }
222#endif
223
224#if SK_CPU_LSX_LEVEL >= SK_CPU_LSX_LEVEL_LSX
225 while (len >= 4) {
226 __lsx_vst(SkPMSrcOver_LSX(__lsx_vld(src, 0),
227 __lsx_vld(dst, 0)), (__m128i*)dst, 0);
228 src += 4;
229 dst += 4;
230 len -= 4;
231 }
232#endif
233
234 while (len --> 0) {
235 *dst = SkPMSrcOver(*src, *dst);
236 src++;
237 dst++;
238 }
239}
static __m128i SkPMSrcOver_LSX(const __m128i &src, const __m128i &dst)
static __m256i SkPMSrcOver_LASX(const __m256i &src, const __m256i &dst)
static __m128i SkPMSrcOver_SSE2(const __m128i &src, const __m128i &dst)
static __m256i SkPMSrcOver_AVX2(const __m256i &src, const __m256i &dst)
static SkPMColor SkPMSrcOver(SkPMColor src, SkPMColor dst)
static void sk_msan_assert_initialized(const void *begin, const void *end)
Definition SkMSAN.h:24
GAsyncResult * result

◆ cast()

SI F SK_OPTS_NS::cast ( U32  v)

Definition at line 1337 of file SkRasterPipeline_opts.h.

1337{ return (F)v; }

◆ cast64()

SI F SK_OPTS_NS::cast64 ( U64  v)

Definition at line 1338 of file SkRasterPipeline_opts.h.

1338{ return (F)v; }

◆ cast_to_float_from_fn()

template<typename T >
SI void SK_OPTS_NS::cast_to_float_from_fn ( T dst)

Definition at line 4377 of file SkRasterPipeline_opts.h.

4377 {
4378 *dst = sk_bit_cast<T>((F)*dst);
4379}

◆ cast_to_int_from_fn()

SI void SK_OPTS_NS::cast_to_int_from_fn ( F dst)

Definition at line 4380 of file SkRasterPipeline_opts.h.

4380 {
4381 *dst = sk_bit_cast<F>((I32)*dst);
4382}

◆ cast_to_uint_from_fn()

SI void SK_OPTS_NS::cast_to_uint_from_fn ( F dst)

Definition at line 4383 of file SkRasterPipeline_opts.h.

4383 {
4384 *dst = sk_bit_cast<F>((U32)*dst);
4385}

◆ ceil_()

SI F SK_OPTS_NS::ceil_ ( F  v)

Definition at line 153 of file SkRasterPipeline_opts.h.

153{ return ceilf(v); }

◆ ceil_fn()

SI void SK_OPTS_NS::ceil_fn ( F dst)

Definition at line 4407 of file SkRasterPipeline_opts.h.

4407 {
4408 *dst = ceil_(*dst);
4409}

◆ clamp()

SI F SK_OPTS_NS::clamp ( F  v,
F  limit 
)

Definition at line 1825 of file SkRasterPipeline_opts.h.

1825 {
1826 F inclusive = sk_bit_cast<F>(sk_bit_cast<U32>(limit) - 1); // Exclusive -> inclusive.
1827 return min(max(0.0f, v), inclusive);
1828}

◆ clamp_01_()

SI F SK_OPTS_NS::clamp_01_ ( F  v)

Definition at line 2372 of file SkRasterPipeline_opts.h.

2372{ return min(max(0.0f, v), 1.0f); }

◆ clamp_ex()

SI F SK_OPTS_NS::clamp_ex ( F  v,
float  limit 
)

Definition at line 1831 of file SkRasterPipeline_opts.h.

1831 {
1832 const F inclusiveZ = F_(std::numeric_limits<float>::min()),
1833 inclusiveL = sk_bit_cast<F>( sk_bit_cast<U32>(F_(limit)) - 1 );
1834 return min(max(inclusiveZ, v), inclusiveL);
1835}

◆ clip_channel()

SI F SK_OPTS_NS::clip_channel ( F  c,
F  l,
I32  clip_low,
I32  clip_high,
F  mn_scale,
F  mx_scale 
)

Definition at line 2272 of file SkRasterPipeline_opts.h.

2272 {
2273 c = if_then_else(clip_low, mad(mn_scale, c - l, l), c);
2274 c = if_then_else(clip_high, mad(mx_scale, c - l, l), c);
2275 c = max(c, 0.0f); // Sometimes without this we may dip just a little negative.
2276 return c;
2277}

◆ clip_color()

SI void SK_OPTS_NS::clip_color ( F r,
F g,
F b,
F  a 
)

Definition at line 2278 of file SkRasterPipeline_opts.h.

2278 {
2279 F mn = min(*r, min(*g, *b)),
2280 mx = max(*r, max(*g, *b)),
2281 l = lum(*r, *g, *b),
2282 mn_scale = ( l) * rcp_fast(l - mn),
2283 mx_scale = (a - l) * rcp_fast(mx - l);
2284 I32 clip_low = cond_to_mask(mn < 0 && l != mn),
2285 clip_high = cond_to_mask(mx > a && l != mx);
2286
2287 *r = clip_channel(*r, l, clip_low, clip_high, mn_scale, mx_scale);
2288 *g = clip_channel(*g, l, clip_low, clip_high, mn_scale, mx_scale);
2289 *b = clip_channel(*b, l, clip_low, clip_high, mn_scale, mx_scale);
2290}
static float lum(float r, float g, float b)
Definition hsl.cpp:52
SI F clip_channel(F c, F l, I32 clip_low, I32 clip_high, F mn_scale, F mx_scale)
SI I32 cond_to_mask(I32 cond)

◆ cmpeq_fn()

template<typename T >
SI void SK_OPTS_NS::cmpeq_fn ( T dst,
T src 
)

Definition at line 4631 of file SkRasterPipeline_opts.h.

4631 {
4632 static_assert(sizeof(T) == sizeof(I32));
4633 I32 result = cond_to_mask(*dst == *src);
4634 memcpy(dst, &result, sizeof(I32));
4635}

◆ cmple_fn()

template<typename T >
SI void SK_OPTS_NS::cmple_fn ( T dst,
T src 
)

Definition at line 4624 of file SkRasterPipeline_opts.h.

4624 {
4625 static_assert(sizeof(T) == sizeof(I32));
4626 I32 result = cond_to_mask(*dst <= *src);
4627 memcpy(dst, &result, sizeof(I32));
4628}

◆ cmplt_fn()

template<typename T >
SI void SK_OPTS_NS::cmplt_fn ( T dst,
T src 
)

Definition at line 4617 of file SkRasterPipeline_opts.h.

4617 {
4618 static_assert(sizeof(T) == sizeof(I32));
4619 I32 result = cond_to_mask(*dst < *src);
4620 memcpy(dst, &result, sizeof(I32));
4621}

◆ cmpne_fn()

template<typename T >
SI void SK_OPTS_NS::cmpne_fn ( T dst,
T src 
)

Definition at line 4638 of file SkRasterPipeline_opts.h.

4638 {
4639 static_assert(sizeof(T) == sizeof(I32));
4640 I32 result = cond_to_mask(*dst != *src);
4641 memcpy(dst, &result, sizeof(I32));
4642}

◆ compute_perlin_vector()

SI F SK_OPTS_NS::compute_perlin_vector ( U32  sample,
F  x,
F  y 
)

Definition at line 3692 of file SkRasterPipeline_opts.h.

3692 {
3693 // We're relying on the packing of uint16s within a uint32, which will vary based on endianness.
3694#ifdef SK_CPU_BENDIAN
3695 U32 sampleLo = sample >> 16;
3696 U32 sampleHi = sample & 0xFFFF;
3697#else
3698 U32 sampleLo = sample & 0xFFFF;
3699 U32 sampleHi = sample >> 16;
3700#endif
3701
3702 // Convert 32-bit sample value into two floats in the [-1..1] range.
3703 F vecX = mad(cast(sampleLo), 2.0f / 65535.0f, -1.0f);
3704 F vecY = mad(cast(sampleHi), 2.0f / 65535.0f, -1.0f);
3705
3706 // Return the dot of the sample and the passed-in vector.
3707 return mad(vecX, x,
3708 vecY * y);
3709}

◆ cond_to_mask()

SI I32 SK_OPTS_NS::cond_to_mask ( I32  cond)

Definition at line 2010 of file SkRasterPipeline_opts.h.

2010 {
2011#if defined(JUMPER_IS_SCALAR)
2012 // In scalar mode, conditions are bools (0 or 1), but we want to store and operate on masks
2013 // (eg, using bitwise operations to select values).
2014 return if_then_else(cond, I32(~0), I32(0));
2015#else
2016 // In SIMD mode, our various instruction sets already represent conditions as masks.
2017 return cond;
2018#endif
2019}
V< int32_t > I32

◆ copy_n_immutable_unmasked_fn()

template<int NumSlots>
SI void SK_OPTS_NS::copy_n_immutable_unmasked_fn ( SkRasterPipeline_BinaryOpCtx packed,
std::byte *  base 
)

Definition at line 4140 of file SkRasterPipeline_opts.h.

4140 {
4141 auto ctx = SkRPCtxUtils::Unpack(packed);
4142
4143 // Load the scalar values.
4144 float* src = (float*)(base + ctx.src);
4145 float values[NumSlots];
4146 SK_UNROLL for (int index = 0; index < NumSlots; ++index) {
4147 values[index] = src[index];
4148 }
4149 // Broadcast the scalars into the destination.
4150 F* dst = (F*)(base + ctx.dst);
4151 SK_UNROLL for (int index = 0; index < NumSlots; ++index) {
4152 dst[index] = F_(values[index]);
4153 }
4154}

◆ copy_n_slots_masked_fn()

template<int NumSlots>
SI void SK_OPTS_NS::copy_n_slots_masked_fn ( SkRasterPipeline_BinaryOpCtx packed,
std::byte *  base,
I32  mask 
)

Definition at line 4170 of file SkRasterPipeline_opts.h.

4170 {
4171 auto ctx = SkRPCtxUtils::Unpack(packed);
4172 I32* dst = (I32*)(base + ctx.dst);
4173 I32* src = (I32*)(base + ctx.src);
4174 SK_UNROLL for (int count = 0; count < NumSlots; ++count) {
4175 *dst = if_then_else(mask, *src, *dst);
4176 dst += 1;
4177 src += 1;
4178 }
4179}

◆ copy_n_slots_unmasked_fn()

template<int NumSlots>
SI void SK_OPTS_NS::copy_n_slots_unmasked_fn ( SkRasterPipeline_BinaryOpCtx packed,
std::byte *  base 
)

Definition at line 4119 of file SkRasterPipeline_opts.h.

4119 {
4120 auto ctx = SkRPCtxUtils::Unpack(packed);
4121 F* dst = (F*)(base + ctx.dst);
4122 F* src = (F*)(base + ctx.src);
4123 memcpy(dst, src, sizeof(F) * NumSlots);
4124}

◆ cos_()

SI F SK_OPTS_NS::cos_ ( F  x)

Definition at line 1857 of file SkRasterPipeline_opts.h.

1857 {
1858 constexpr float one_over_pi2 = 1 / (2 * SK_FloatPI);
1859 x *= one_over_pi2;
1860 x = 0.25f - abs_(x - floor_(x + 0.5f));
1861 return sin5q_(x);
1862}
SI F floor_(F x)

◆ css_hsl_to_srgb_()

SI RGB SK_OPTS_NS::css_hsl_to_srgb_ ( F  h,
F  s,
F  l 
)

Definition at line 2571 of file SkRasterPipeline_opts.h.

2571 {
2572 h = mod_(h, 360);
2573
2574 s *= 0.01f;
2575 l *= 0.01f;
2576
2577 F k[3] = {
2578 mod_(0 + h * (1 / 30.0f), 12),
2579 mod_(8 + h * (1 / 30.0f), 12),
2580 mod_(4 + h * (1 / 30.0f), 12)
2581 };
2582 F a = s * min(l, 1 - l);
2583 return {
2584 l - a * max(-1.0f, min(min(k[0] - 3.0f, 9.0f - k[0]), 1.0f)),
2585 l - a * max(-1.0f, min(min(k[1] - 3.0f, 9.0f - k[1]), 1.0f)),
2586 l - a * max(-1.0f, min(min(k[2] - 3.0f, 9.0f - k[2]), 1.0f))
2587 };
2588}
SI F mod_(F x, float y)

◆ D32_A8_Opaque_Color_lsx()

template<bool isColor>
static void SK_OPTS_NS::D32_A8_Opaque_Color_lsx ( void *SK_RESTRICT  dst,
size_t  dstRB,
const void *SK_RESTRICT  maskPtr,
size_t  maskRB,
SkColor  color,
int  width,
int  height 
)
static

Definition at line 174 of file SkBlitMask_opts.h.

176 {
179 const uint8_t* SK_RESTRICT mask = (const uint8_t*)maskPtr;
180 __m128i vpmc_b = __lsx_vldi(0);
181 __m128i vpmc_g = __lsx_vldi(0);
182 __m128i vpmc_r = __lsx_vldi(0);
183 __m128i vpmc_a = __lsx_vldi(0);
184
185 // Nine patch may set maskRB to 0 to blit the same row repeatedly.
186 ptrdiff_t mask_adjust = (ptrdiff_t)maskRB - width;
187 dstRB -= (width << 2);
188
189 if (width >= 8) {
190 vpmc_b = __lsx_vreplgr2vr_h(SkGetPackedB32(pmc));
191 vpmc_g = __lsx_vreplgr2vr_h(SkGetPackedG32(pmc));
192 vpmc_r = __lsx_vreplgr2vr_h(SkGetPackedR32(pmc));
193 vpmc_a = __lsx_vreplgr2vr_h(SkGetPackedA32(pmc));
194 }
195
196 const __m128i zeros = __lsx_vldi(0);
197 __m128i planar = __lsx_vldi(0);
198 planar = __lsx_vinsgr2vr_d(planar, 0x0d0905010c080400, 0);
199 planar = __lsx_vinsgr2vr_d(planar, 0x0f0b07030e0a0602, 1);
200
201 do{
202 int w = width;
203 while(w >= 8){
204 __m128i lo = __lsx_vld(device, 0); // bgra bgra bgra bgra
205 __m128i hi = __lsx_vld(device, 16); // BGRA BGRA BGRA BGRA
206 lo = __lsx_vshuf_b(zeros, lo, planar); // bbbb gggg rrrr aaaa
207 hi = __lsx_vshuf_b(zeros, hi, planar); // BBBB GGGG RRRR AAAA
208 __m128i bg = __lsx_vilvl_w(hi, lo), // bbbb BBBB gggg GGGG
209 ra = __lsx_vilvh_w(hi, lo); // rrrr RRRR aaaa AAAA
210
211 __m128i b = __lsx_vilvl_b(zeros, bg), // _b_b _b_b _B_B _B_B
212 g = __lsx_vilvh_b(zeros, bg), // _g_g _g_g _G_G _G_G
213 r = __lsx_vilvl_b(zeros, ra), // _r_r _r_r _R_R _R_R
214 a = __lsx_vilvh_b(zeros, ra); // _a_a _a_a _A_A _A_A
215
216 __m128i vmask = __lsx_vld(mask, 0);
217 vmask = __lsx_vilvl_b(zeros, vmask);
218 __m128i vscale, vmask256 = __lsx_vadd_h(vmask, __lsx_vreplgr2vr_h(1));
219
220 if (isColor) {
221 __m128i tmp = SkAlphaMul_lsx(vpmc_a, vmask256);
222 vscale = __lsx_vsub_h(__lsx_vreplgr2vr_h(256), tmp);
223 } else {
224 vscale = __lsx_vsub_h(__lsx_vreplgr2vr_h(256), vmask);
225 }
226
227 b = SkAlphaMul_lsx(vpmc_b, vmask256) + SkAlphaMul_lsx(b, vscale);
228 g = SkAlphaMul_lsx(vpmc_g, vmask256) + SkAlphaMul_lsx(g, vscale);
229 r = SkAlphaMul_lsx(vpmc_r, vmask256) + SkAlphaMul_lsx(r, vscale);
230 a = SkAlphaMul_lsx(vpmc_a, vmask256) + SkAlphaMul_lsx(a, vscale);
231
232 bg = __lsx_vor_v(b, __lsx_vslli_h(g, 8)); // bgbg bgbg BGBG BGBG
233 ra = __lsx_vor_v(r, __lsx_vslli_h(a, 8)); // rara rara RARA RARA
234 lo = __lsx_vilvl_h(ra, bg); // bgra bgra bgra bgra
235 hi = __lsx_vilvh_h(ra, bg); // BGRA BGRA BGRA BGRA
236
237 __lsx_vst(lo, device, 0);
238 __lsx_vst(hi, device, 16);
239
240 mask += 8;
241 device += 8;
242 w -= 8;
243 }
244
245 while (w--) {
246 unsigned aa = *mask++;
247 if (isColor) {
248 *device = SkBlendARGB32(pmc, *device, aa);
249 } else {
251 + SkAlphaMulQ(*device, SkAlpha255To256(255 - aa));
252 }
253 device += 1;
254 }
255
256 device = (uint32_t *)((char*)device + dstRB);
257 mask += mask_adjust;
258
259 } while (--height != 0);
260 }
static SkPMColor SkBlendARGB32(SkPMColor src, SkPMColor dst, U8CPU aa)
#define SkGetPackedB32(packed)
Definition SkColorPriv.h:95
#define SkGetPackedR32(packed)
Definition SkColorPriv.h:93
#define SkGetPackedG32(packed)
Definition SkColorPriv.h:94
SK_API SkPMColor SkPreMultiplyColor(SkColor c)
Definition SkColor.cpp:21

◆ DECLARE_BINARY_FLOAT()

SK_OPTS_NS::DECLARE_BINARY_FLOAT ( add  )

◆ DECLARE_UNARY_INT()

SK_OPTS_NS::DECLARE_UNARY_INT ( cast_to_float_from  )

◆ decode_packed_coordinates_and_weight()

template<typename U32 , typename Out >
static void SK_OPTS_NS::decode_packed_coordinates_and_weight ( U32  packed,
Out *  v0,
Out *  v1,
Out *  w 
)
static

Definition at line 38 of file SkBitmapProcState_opts.h.

38 {
39 *v0 = (packed >> 18); // Integer coordinate x0 or y0.
40 *v1 = (packed & 0x3fff); // Integer coordinate x1 or y1.
41 *w = (packed >> 14) & 0xf; // Lerp weight for v1; weight for v0 is 16-w.
42}

◆ div_fn()

template<typename T >
SI void SK_OPTS_NS::div_fn ( T dst,
T src 
)

Definition at line 4585 of file SkRasterPipeline_opts.h.

4585 {
4586 T divisor = *src;
4587 if constexpr (!std::is_same_v<T, F>) {
4588 // We will crash if we integer-divide against zero. Convert 0 to ~0 to avoid this.
4589 divisor |= (T)cond_to_mask(divisor == 0);
4590 }
4591 *dst /= divisor;
4592}

◆ exclusive_mirror()

SI F SK_OPTS_NS::exclusive_mirror ( F  v,
const SkRasterPipeline_TileCtx ctx 
)

Definition at line 3239 of file SkRasterPipeline_opts.h.

3239 {
3240 auto limit = ctx->scale;
3241 auto invLimit = ctx->invScale;
3242
3243 // This is "repeat" over the range 0..2*limit
3244 auto u = v - floor_(v*invLimit*0.5f)*2*limit;
3245 // s will be 0 when moving forward (e.g. [0, limit)) and 1 when moving backward (e.g.
3246 // [limit, 2*limit)).
3247 auto s = floor_(u*invLimit);
3248 // This is the mirror result.
3249 auto m = u - 2*s*(u - limit);
3250 // Apply a bias to m if moving backwards so that we snap consistently at exact integer coords in
3251 // the logical infinite image. This is tested by mirror_tile GM. Note that all values
3252 // that have a non-zero bias applied are > 0.
3253 auto biasInUlps = trunc_(s);
3254 return sk_bit_cast<F>(sk_bit_cast<U32>(m) + ctx->mirrorBiasDir*biasInUlps);
3255}

◆ exclusive_repeat()

SI F SK_OPTS_NS::exclusive_repeat ( F  v,
const SkRasterPipeline_TileCtx ctx 
)

Definition at line 3236 of file SkRasterPipeline_opts.h.

3236 {
3237 return v - floor_(v*ctx->invScale)*ctx->scale;
3238}

◆ expand() [1/2]

SI U32 SK_OPTS_NS::expand ( U16  v)

Definition at line 1340 of file SkRasterPipeline_opts.h.

1340{ return (U32)v; }

◆ expand() [2/2]

SI U32 SK_OPTS_NS::expand ( U8  v)

Definition at line 1341 of file SkRasterPipeline_opts.h.

1341{ return (U32)v; }

◆ F_()

SI constexpr F SK_OPTS_NS::F_ ( float  x)
constexpr

Definition at line 1299 of file SkRasterPipeline_opts.h.

1299{ return x; }

◆ floor_()

SI F SK_OPTS_NS::floor_ ( F  v)

Definition at line 152 of file SkRasterPipeline_opts.h.

152{ return floorf(v); }

◆ floor_fn()

SI void SK_OPTS_NS::floor_fn ( F dst)

Definition at line 4403 of file SkRasterPipeline_opts.h.

4403 {
4404 *dst = floor_(*dst);
4405}

◆ fract()

SI F SK_OPTS_NS::fract ( F  v)

Definition at line 1356 of file SkRasterPipeline_opts.h.

1356{ return v - floor_(v); }

◆ from_10101010_xr()

SI void SK_OPTS_NS::from_10101010_xr ( U64  _10x6,
F r,
F g,
F b,
F a 
)

Definition at line 1795 of file SkRasterPipeline_opts.h.

1795 {
1796 *r = (cast64((_10x6 >> 6) & 0x3ff) - 384.f) / 510.f;
1797 *g = (cast64((_10x6 >> 22) & 0x3ff) - 384.f) / 510.f;
1798 *b = (cast64((_10x6 >> 38) & 0x3ff) - 384.f) / 510.f;
1799 *a = (cast64((_10x6 >> 54) & 0x3ff) - 384.f) / 510.f;
1800}

◆ from_1010102()

SI void SK_OPTS_NS::from_1010102 ( U32  rgba,
F r,
F g,
F b,
F a 
)

Definition at line 1780 of file SkRasterPipeline_opts.h.

1780 {
1781 *r = cast((rgba ) & 0x3ff) * (1/1023.0f);
1782 *g = cast((rgba >> 10) & 0x3ff) * (1/1023.0f);
1783 *b = cast((rgba >> 20) & 0x3ff) * (1/1023.0f);
1784 *a = cast((rgba >> 30) ) * (1/ 3.0f);
1785}
static const uint32_t rgba[kNumPixels]

◆ from_1010102_xr()

SI void SK_OPTS_NS::from_1010102_xr ( U32  rgba,
F r,
F g,
F b,
F a 
)

Definition at line 1786 of file SkRasterPipeline_opts.h.

1786 {
1787 static constexpr float min = -0.752941f;
1788 static constexpr float max = 1.25098f;
1789 static constexpr float range = max - min;
1790 *r = cast((rgba ) & 0x3ff) * (1/1023.0f) * range + min;
1791 *g = cast((rgba >> 10) & 0x3ff) * (1/1023.0f) * range + min;
1792 *b = cast((rgba >> 20) & 0x3ff) * (1/1023.0f) * range + min;
1793 *a = cast((rgba >> 30) ) * (1/ 3.0f);
1794}

◆ from_10x6()

SI void SK_OPTS_NS::from_10x6 ( U64  _10x6,
F r,
F g,
F b,
F a 
)

Definition at line 1801 of file SkRasterPipeline_opts.h.

1801 {
1802 *r = cast64((_10x6 >> 6) & 0x3ff) * (1/1023.0f);
1803 *g = cast64((_10x6 >> 22) & 0x3ff) * (1/1023.0f);
1804 *b = cast64((_10x6 >> 38) & 0x3ff) * (1/1023.0f);
1805 *a = cast64((_10x6 >> 54) & 0x3ff) * (1/1023.0f);
1806}

◆ from_1616()

SI void SK_OPTS_NS::from_1616 ( U32  _1616,
F r,
F g 
)

Definition at line 1807 of file SkRasterPipeline_opts.h.

1807 {
1808 *r = cast((_1616 ) & 0xffff) * (1/65535.0f);
1809 *g = cast((_1616 >> 16) & 0xffff) * (1/65535.0f);
1810}

◆ from_16161616()

SI void SK_OPTS_NS::from_16161616 ( U64  _16161616,
F r,
F g,
F b,
F a 
)

Definition at line 1811 of file SkRasterPipeline_opts.h.

1811 {
1812 *r = cast64((_16161616 ) & 0xffff) * (1/65535.0f);
1813 *g = cast64((_16161616 >> 16) & 0xffff) * (1/65535.0f);
1814 *b = cast64((_16161616 >> 32) & 0xffff) * (1/65535.0f);
1815 *a = cast64((_16161616 >> 48) & 0xffff) * (1/65535.0f);
1816}

◆ from_4444()

SI void SK_OPTS_NS::from_4444 ( U16  _4444,
F r,
F g,
F b,
F a 
)

Definition at line 1762 of file SkRasterPipeline_opts.h.

1762 {
1763 U32 wide = expand(_4444);
1764 *r = cast(wide & (15<<12)) * (1.0f / (15<<12));
1765 *g = cast(wide & (15<< 8)) * (1.0f / (15<< 8));
1766 *b = cast(wide & (15<< 4)) * (1.0f / (15<< 4));
1767 *a = cast(wide & (15<< 0)) * (1.0f / (15<< 0));
1768}
SI U32 expand(U16 v)

◆ from_565()

SI void SK_OPTS_NS::from_565 ( U16  _565,
F r,
F g,
F b 
)

Definition at line 1756 of file SkRasterPipeline_opts.h.

1756 {
1757 U32 wide = expand(_565);
1758 *r = cast(wide & (31<<11)) * (1.0f / (31<<11));
1759 *g = cast(wide & (63<< 5)) * (1.0f / (63<< 5));
1760 *b = cast(wide & (31<< 0)) * (1.0f / (31<< 0));
1761}

◆ from_88()

SI void SK_OPTS_NS::from_88 ( U16  _88,
F r,
F g 
)

Definition at line 1775 of file SkRasterPipeline_opts.h.

1775 {
1776 U32 wide = expand(_88);
1777 *r = cast((wide ) & 0xff) * (1/255.0f);
1778 *g = cast((wide >> 8) & 0xff) * (1/255.0f);
1779}

◆ from_8888()

SI void SK_OPTS_NS::from_8888 ( U32  _8888,
F r,
F g,
F b,
F a 
)

Definition at line 1769 of file SkRasterPipeline_opts.h.

1769 {
1770 *r = cast((_8888 ) & 0xff) * (1/255.0f);
1771 *g = cast((_8888 >> 8) & 0xff) * (1/255.0f);
1772 *b = cast((_8888 >> 16) & 0xff) * (1/255.0f);
1773 *a = cast((_8888 >> 24) ) * (1/255.0f);
1774}

◆ from_byte()

SI F SK_OPTS_NS::from_byte ( U8  b)

Definition at line 1750 of file SkRasterPipeline_opts.h.

1750 {
1751 return cast(expand(b)) * (1/255.0f);
1752}

◆ from_half()

SI F SK_OPTS_NS::from_half ( U16  h)

Definition at line 1399 of file SkRasterPipeline_opts.h.

1399 {
1400#if defined(JUMPER_IS_NEON) && defined(SK_CPU_ARM64)
1401 return vcvt_f32_f16((float16x4_t)h);
1402
1403#elif defined(JUMPER_IS_SKX)
1404 return _mm512_cvtph_ps((__m256i)h);
1405
1406#elif defined(JUMPER_IS_HSW)
1407 return _mm256_cvtph_ps((__m128i)h);
1408
1409#else
1410 // Remember, a half is 1-5-10 (sign-exponent-mantissa) with 15 exponent bias.
1411 U32 sem = expand(h),
1412 s = sem & 0x8000,
1413 em = sem ^ s;
1414
1415 // Convert to 1-8-23 float with 127 bias, flushing denorm halfs (including zero) to zero.
1416 auto denorm = (I32)em < 0x0400; // I32 comparison is often quicker, and always safe here.
1417 return if_then_else(denorm, F0
1418 , sk_bit_cast<F>( (s<<16) + (em<<13) + ((127-15)<<23) ));
1419#endif
1420}

◆ from_short()

SI F SK_OPTS_NS::from_short ( U16  s)

Definition at line 1753 of file SkRasterPipeline_opts.h.

1753 {
1754 return cast(expand(s)) * (1/65535.0f);
1755}

◆ gather()

template<typename T >
SI T SK_OPTS_NS::gather ( const T p,
U32  ix 
)

Definition at line 172 of file SkRasterPipeline_opts.h.

172{ return p[ix]; }

◆ gradient_lookup()

SI void SK_OPTS_NS::gradient_lookup ( const SkRasterPipeline_GradientCtx c,
U32  idx,
F  t,
F r,
F g,
F b,
F a 
)

Definition at line 3391 of file SkRasterPipeline_opts.h.

3392 {
3393 F fr, br, fg, bg, fb, bb, fa, ba;
3394#if defined(JUMPER_IS_HSW)
3395 if (c->stopCount <=8) {
3396 fr = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[0]), (__m256i)idx);
3397 br = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[0]), (__m256i)idx);
3398 fg = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[1]), (__m256i)idx);
3399 bg = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[1]), (__m256i)idx);
3400 fb = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[2]), (__m256i)idx);
3401 bb = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[2]), (__m256i)idx);
3402 fa = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[3]), (__m256i)idx);
3403 ba = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[3]), (__m256i)idx);
3404 } else
3405#elif defined(JUMPER_IS_LASX)
3406 if (c->stopCount <= 8) {
3407 fr = (__m256)__lasx_xvperm_w(__lasx_xvld(c->fs[0], 0), idx);
3408 br = (__m256)__lasx_xvperm_w(__lasx_xvld(c->bs[0], 0), idx);
3409 fg = (__m256)__lasx_xvperm_w(__lasx_xvld(c->fs[1], 0), idx);
3410 bg = (__m256)__lasx_xvperm_w(__lasx_xvld(c->bs[1], 0), idx);
3411 fb = (__m256)__lasx_xvperm_w(__lasx_xvld(c->fs[2], 0), idx);
3412 bb = (__m256)__lasx_xvperm_w(__lasx_xvld(c->bs[2], 0), idx);
3413 fa = (__m256)__lasx_xvperm_w(__lasx_xvld(c->fs[3], 0), idx);
3414 ba = (__m256)__lasx_xvperm_w(__lasx_xvld(c->bs[3], 0), idx);
3415 } else
3416#elif defined(JUMPER_IS_LSX)
3417 if (c->stopCount <= 4) {
3418 __m128i zero = __lsx_vldi(0);
3419 fr = (__m128)__lsx_vshuf_w(idx, zero, __lsx_vld(c->fs[0], 0));
3420 br = (__m128)__lsx_vshuf_w(idx, zero, __lsx_vld(c->bs[0], 0));
3421 fg = (__m128)__lsx_vshuf_w(idx, zero, __lsx_vld(c->fs[1], 0));
3422 bg = (__m128)__lsx_vshuf_w(idx, zero, __lsx_vld(c->bs[1], 0));
3423 fb = (__m128)__lsx_vshuf_w(idx, zero, __lsx_vld(c->fs[2], 0));
3424 bb = (__m128)__lsx_vshuf_w(idx, zero, __lsx_vld(c->bs[2], 0));
3425 fa = (__m128)__lsx_vshuf_w(idx, zero, __lsx_vld(c->fs[3], 0));
3426 ba = (__m128)__lsx_vshuf_w(idx, zero, __lsx_vld(c->bs[3], 0));
3427 } else
3428#endif
3429 {
3430 fr = gather(c->fs[0], idx);
3431 br = gather(c->bs[0], idx);
3432 fg = gather(c->fs[1], idx);
3433 bg = gather(c->bs[1], idx);
3434 fb = gather(c->fs[2], idx);
3435 bb = gather(c->bs[2], idx);
3436 fa = gather(c->fs[3], idx);
3437 ba = gather(c->bs[3], idx);
3438 }
3439
3440 *r = mad(t, fr, br);
3441 *g = mad(t, fg, bg);
3442 *b = mad(t, fb, bb);
3443 *a = mad(t, fa, ba);
3444}
SI T gather(const T *p, U32 ix)

◆ I32_()

SI constexpr I32 SK_OPTS_NS::I32_ ( int32_t  x)
constexpr

Definition at line 1300 of file SkRasterPipeline_opts.h.

1300{ return x; }

◆ if_then_else() [1/2]

SI F SK_OPTS_NS::if_then_else ( I32  c,
F  t,
F  e 
)

Definition at line 165 of file SkRasterPipeline_opts.h.

165{ return c ? t : e; }

◆ if_then_else() [2/2]

SI I32 SK_OPTS_NS::if_then_else ( I32  c,
I32  t,
I32  e 
)

Definition at line 166 of file SkRasterPipeline_opts.h.

166{ return c ? t : e; }

◆ inv()

SI F SK_OPTS_NS::inv ( F  x)

Definition at line 2171 of file SkRasterPipeline_opts.h.

2171{ return 1.0f - x; }

◆ invsqrt_fn()

SI void SK_OPTS_NS::invsqrt_fn ( F dst)

Definition at line 4411 of file SkRasterPipeline_opts.h.

4411 {
4412 *dst = rsqrt(*dst);
4413}

◆ iround()

SI I32 SK_OPTS_NS::iround ( F  v)

Definition at line 159 of file SkRasterPipeline_opts.h.

159{ return (I32)(v + 0.5f); }

◆ ix_and_ptr()

template<typename T >
SI U32 SK_OPTS_NS::ix_and_ptr ( T **  ptr,
const SkRasterPipeline_GatherCtx ctx,
F  x,
F  y 
)

Definition at line 1987 of file SkRasterPipeline_opts.h.

1987 {
1988 // We use exclusive clamp so that our min value is > 0 because ULP subtraction using U32 would
1989 // produce a NaN if applied to +0.f.
1990 x = clamp_ex(x, ctx->width );
1991 y = clamp_ex(y, ctx->height);
1992 x = sk_bit_cast<F>(sk_bit_cast<U32>(x) - (uint32_t)ctx->roundDownAtInteger);
1993 y = sk_bit_cast<F>(sk_bit_cast<U32>(y) - (uint32_t)ctx->roundDownAtInteger);
1994 *ptr = (const T*)ctx->pixels;
1995 return trunc_(y)*ctx->stride + trunc_(x);
1996}
SI F clamp_ex(F v, float limit)

◆ just_return()

static void ABI SK_OPTS_NS::just_return ( Params ,
SkRasterPipelineStage ,
F  ,
F  ,
F  ,
F   
)
static

Definition at line 1626 of file SkRasterPipeline_opts.h.

1626{}

◆ lerp()

SI F SK_OPTS_NS::lerp ( F  from,
F  to,
F  t 
)

Definition at line 2652 of file SkRasterPipeline_opts.h.

2652 {
2653 return mad(to-from, t, from);
2654}

◆ load()

template<typename V , typename T >
SI V SK_OPTS_NS::load ( const T src)

Definition at line 1741 of file SkRasterPipeline_opts.h.

1741 {
1742 return sk_unaligned_load<V>(src);
1743}

◆ load2()

SI void SK_OPTS_NS::load2 ( const uint16_t *  ptr,
U16 r,
U16 g 
)

Definition at line 178 of file SkRasterPipeline_opts.h.

178 {
179 *r = ptr[0];
180 *g = ptr[1];
181 }

◆ load4() [1/2]

SI void SK_OPTS_NS::load4 ( const float *  ptr,
F r,
F g,
F b,
F a 
)

Definition at line 199 of file SkRasterPipeline_opts.h.

199 {
200 *r = ptr[0];
201 *g = ptr[1];
202 *b = ptr[2];
203 *a = ptr[3];
204 }

◆ load4() [2/2]

SI void SK_OPTS_NS::load4 ( const uint16_t *  ptr,
U16 r,
U16 g,
U16 b,
U16 a 
)

Definition at line 186 of file SkRasterPipeline_opts.h.

186 {
187 *r = ptr[0];
188 *g = ptr[1];
189 *b = ptr[2];
190 *a = ptr[3];
191 }

◆ lum()

SI F SK_OPTS_NS::lum ( F  r,
F  g,
F  b 
)

Definition at line 2253 of file SkRasterPipeline_opts.h.

2253{ return mad(r, 0.30f, mad(g, 0.59f, b*0.11f)); }

◆ mad()

SI F SK_OPTS_NS::mad ( F  f,
F  m,
F  a 
)

Definition at line 148 of file SkRasterPipeline_opts.h.

148{ return a+f*m; }

◆ matrix_multiply()

template<int N>
SI void SK_OPTS_NS::matrix_multiply ( SkRasterPipeline_MatrixMultiplyCtx packed,
std::byte *  base 
)

Definition at line 4786 of file SkRasterPipeline_opts.h.

4786 {
4787 auto ctx = SkRPCtxUtils::Unpack(packed);
4788
4789 int outColumns = ctx.rightColumns,
4790 outRows = ctx.leftRows;
4791
4792 SkASSERT(outColumns >= 1);
4793 SkASSERT(outRows >= 1);
4794 SkASSERT(outColumns <= 4);
4795 SkASSERT(outRows <= 4);
4796
4797 SkASSERT(ctx.leftColumns == ctx.rightRows);
4798 SkASSERT(N == ctx.leftColumns); // N should match the result width
4799
4800#if !defined(JUMPER_IS_SCALAR)
4801 // This prevents Clang from generating early-out checks for zero-sized matrices.
4802 SK_ASSUME(outColumns >= 1);
4803 SK_ASSUME(outRows >= 1);
4804 SK_ASSUME(outColumns <= 4);
4805 SK_ASSUME(outRows <= 4);
4806#endif
4807
4808 // Get pointers to the adjacent left- and right-matrices.
4809 F* resultMtx = (F*)(base + ctx.dst);
4810 F* leftMtx = &resultMtx[ctx.rightColumns * ctx.leftRows];
4811 F* rightMtx = &leftMtx[N * ctx.leftRows];
4812
4813 // Emit each matrix element.
4814 for (int c = 0; c < outColumns; ++c) {
4815 for (int r = 0; r < outRows; ++r) {
4816 // Dot a vector from leftMtx[*][r] with rightMtx[c][*].
4817 F* leftRow = &leftMtx [r];
4818 F* rightColumn = &rightMtx[c * N];
4819
4820 F element = *leftRow * *rightColumn;
4821 for (int idx = 1; idx < N; ++idx) {
4822 leftRow += outRows;
4823 rightColumn += 1;
4824 element = mad(*leftRow, *rightColumn, element);
4825 }
4826
4827 *resultMtx++ = element;
4828 }
4829 }
4830}

◆ max() [1/3]

SI F SK_OPTS_NS::max ( F  a,
F  b 
)

Definition at line 144 of file SkRasterPipeline_opts.h.

144{ return fmaxf(a,b); }

◆ max() [2/3]

SI I32 SK_OPTS_NS::max ( I32  a,
I32  b 
)

Definition at line 145 of file SkRasterPipeline_opts.h.

145{ return a > b ? a : b; }

◆ max() [3/3]

SI U32 SK_OPTS_NS::max ( U32  a,
U32  b 
)

Definition at line 146 of file SkRasterPipeline_opts.h.

146{ return a > b ? a : b; }

◆ max_fn()

template<typename T >
SI void SK_OPTS_NS::max_fn ( T dst,
T src 
)

Definition at line 4607 of file SkRasterPipeline_opts.h.

4607 {
4608 *dst = max(*dst, *src);
4609}

◆ memset16()

void SK_OPTS_NS::memset16 ( uint16_t  buffer[],
uint16_t  value,
int  count 
)
inline

Definition at line 38 of file SkMemset_opts.h.

38 {
39 memsetT(buffer, value, count);
40 }
static const uint8_t buffer[]
static void memsetT(T buffer[], T value, int count)

◆ memset32()

void SK_OPTS_NS::memset32 ( uint32_t  buffer[],
uint32_t  value,
int  count 
)
inline

Definition at line 41 of file SkMemset_opts.h.

41 {
42 memsetT(buffer, value, count);
43 }

◆ memset64()

void SK_OPTS_NS::memset64 ( uint64_t  buffer[],
uint64_t  value,
int  count 
)
inline

Definition at line 44 of file SkMemset_opts.h.

44 {
45 memsetT(buffer, value, count);
46 }

◆ memsetT()

template<typename T >
static void SK_OPTS_NS::memsetT ( T  buffer[],
T  value,
int  count 
)
static

Definition at line 17 of file SkMemset_opts.h.

17 {
18 #if defined(SK_CPU_SSE_LEVEL) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX
19 static constexpr int VecSize = 32 / sizeof(T);
20 #else
21 static constexpr int VecSize = 16 / sizeof(T);
22 #endif
23 static_assert(VecSize > 0, "T is too big for memsetT");
24 // Create an vectorized version of value
25 skvx::Vec<VecSize,T> wideValue(value);
26 while (count >= VecSize) {
27 // Copy the value into the destination buffer (VecSize elements at a time)
28 wideValue.store(buffer);
29 buffer += VecSize;
30 count -= VecSize;
31 }
32 // If count was not an even multiple of VecSize, take care of the last few.
33 while (count-- > 0) {
34 *buffer++ = value;
35 }
36 }
uint8_t value

◆ min() [1/3]

SI F SK_OPTS_NS::min ( F  a,
F  b 
)

Definition at line 141 of file SkRasterPipeline_opts.h.

141{ return fminf(a,b); }

◆ min() [2/3]

SI I32 SK_OPTS_NS::min ( I32  a,
I32  b 
)

Definition at line 142 of file SkRasterPipeline_opts.h.

142{ return a < b ? a : b; }

◆ min() [3/3]

SI U32 SK_OPTS_NS::min ( U32  a,
U32  b 
)

Definition at line 143 of file SkRasterPipeline_opts.h.

143{ return a < b ? a : b; }

◆ min_fn()

template<typename T >
SI void SK_OPTS_NS::min_fn ( T dst,
T src 
)

Definition at line 4612 of file SkRasterPipeline_opts.h.

4612 {
4613 *dst = min(*dst, *src);
4614}

◆ mix_fn() [1/2]

SI void SK_OPTS_NS::mix_fn ( F a,
F x,
F y 
)

Definition at line 4892 of file SkRasterPipeline_opts.h.

4892 {
4893 // We reorder the arguments here to match lerp's GLSL-style order (interpolation point last).
4894 *a = lerp(*x, *y, *a);
4895}
static SkPoint lerp(const SkPoint &a, const SkPoint &b, float T)

◆ mix_fn() [2/2]

SI void SK_OPTS_NS::mix_fn ( I32 a,
I32 x,
I32 y 
)

Definition at line 4897 of file SkRasterPipeline_opts.h.

4897 {
4898 // We reorder the arguments here to match if_then_else's expected order (y before x).
4899 *a = if_then_else(*a, *y, *x);
4900}

◆ mod_()

SI F SK_OPTS_NS::mod_ ( F  x,
float  y 
)

Definition at line 2565 of file SkRasterPipeline_opts.h.

2565 {
2566 return x - y * floor_(x * (1 / y));
2567}

◆ mod_fn()

SI void SK_OPTS_NS::mod_fn ( F dst,
F src 
)

Definition at line 4652 of file SkRasterPipeline_opts.h.

4652 {
4653 *dst = *dst - *src * floor_(*dst / *src);
4654}

◆ mul_fn()

template<typename T >
SI void SK_OPTS_NS::mul_fn ( T dst,
T src 
)

Definition at line 4580 of file SkRasterPipeline_opts.h.

4580 {
4581 *dst *= *src;
4582}

◆ nmad()

SI F SK_OPTS_NS::nmad ( F  f,
F  m,
F  a 
)

Definition at line 149 of file SkRasterPipeline_opts.h.

149{ return a-f*m; }

◆ pack() [1/2]

SI U8 SK_OPTS_NS::pack ( U16  v)

Definition at line 163 of file SkRasterPipeline_opts.h.

163{ return (U8)v; }

◆ pack() [2/2]

SI U16 SK_OPTS_NS::pack ( U32  v)

Definition at line 162 of file SkRasterPipeline_opts.h.

162{ return (U16)v; }

◆ patch_memory_contexts()

static void SK_OPTS_NS::patch_memory_contexts ( SkSpan< SkRasterPipeline_MemoryCtxPatch memoryCtxPatches,
size_t  dx,
size_t  dy,
size_t  tail 
)
static

Definition at line 1445 of file SkRasterPipeline_opts.h.

1446 {
1447 for (SkRasterPipeline_MemoryCtxPatch& patch : memoryCtxPatches) {
1448 SkRasterPipeline_MemoryCtx* ctx = patch.info.context;
1449
1450 const ptrdiff_t offset = patch.info.bytesPerPixel * (dy * ctx->stride + dx);
1451 if (patch.info.load) {
1452 void* ctxData = SkTAddOffset<void>(ctx->pixels, offset);
1453 memcpy(patch.scratch, ctxData, patch.info.bytesPerPixel * tail);
1454 }
1455
1456 SkASSERT(patch.backup == nullptr);
1457 void* scratchFakeBase = SkTAddOffset<void>(patch.scratch, -offset);
1458 patch.backup = ctx->pixels;
1459 ctx->pixels = scratchFakeBase;
1460 }
1461}
Point offset

◆ pow_fn()

SI void SK_OPTS_NS::pow_fn ( F dst,
F src 
)

Definition at line 4648 of file SkRasterPipeline_opts.h.

4648 {
4649 *dst = approx_powf(*dst, *src);
4650}
SI F approx_powf(F x, F y)

◆ ptr_at_xy()

template<typename T >
SI T * SK_OPTS_NS::ptr_at_xy ( const SkRasterPipeline_MemoryCtx ctx,
size_t  dx,
size_t  dy 
)

Definition at line 1820 of file SkRasterPipeline_opts.h.

1820 {
1821 return (T*)ctx->pixels + dy*ctx->stride + dx;
1822}

◆ raster_pipeline_highp_stride()

constexpr size_t SK_OPTS_NS::raster_pipeline_highp_stride ( )
constexpr

Definition at line 6609 of file SkRasterPipeline_opts.h.

6609{ return N; }

◆ raster_pipeline_lowp_stride()

constexpr size_t SK_OPTS_NS::raster_pipeline_lowp_stride ( )
constexpr

Allow outside code to access the Raster Pipeline pixel stride.

Definition at line 6608 of file SkRasterPipeline_opts.h.

6608{ return lowp::lowp_N; }

◆ rcp_approx()

SI F SK_OPTS_NS::rcp_approx ( F  v)

Definition at line 154 of file SkRasterPipeline_opts.h.

154{ return 1.0f / v; } // use rcp_fast instead

◆ rcp_fast()

SI F SK_OPTS_NS::rcp_fast ( F  v)

Definition at line 1483 of file SkRasterPipeline_opts.h.

1483{ return rcp_precise(v); }

◆ rcp_precise()

SI F SK_OPTS_NS::rcp_precise ( F  v)

Definition at line 157 of file SkRasterPipeline_opts.h.

157{ return 1.0f / v; }

◆ rect_memset16()

void SK_OPTS_NS::rect_memset16 ( uint16_t  buffer[],
uint16_t  value,
int  count,
size_t  rowBytes,
int  height 
)
inline

Definition at line 56 of file SkMemset_opts.h.

57 {
58 rect_memsetT(buffer, value, count, rowBytes, height);
59 }
static void rect_memsetT(T buffer[], T value, int count, size_t rowBytes, int height)

◆ rect_memset32()

void SK_OPTS_NS::rect_memset32 ( uint32_t  buffer[],
uint32_t  value,
int  count,
size_t  rowBytes,
int  height 
)
inline

Definition at line 60 of file SkMemset_opts.h.

61 {
62 rect_memsetT(buffer, value, count, rowBytes, height);
63 }

◆ rect_memset64()

void SK_OPTS_NS::rect_memset64 ( uint64_t  buffer[],
uint64_t  value,
int  count,
size_t  rowBytes,
int  height 
)
inline

Definition at line 64 of file SkMemset_opts.h.

65 {
66 rect_memsetT(buffer, value, count, rowBytes, height);
67 }

◆ rect_memsetT()

template<typename T >
static void SK_OPTS_NS::rect_memsetT ( T  buffer[],
T  value,
int  count,
size_t  rowBytes,
int  height 
)
static

Definition at line 49 of file SkMemset_opts.h.

49 {
50 while (height --> 0) {
51 memsetT(buffer, value, count);
52 buffer = (T*)((char*)buffer + rowBytes);
53 }
54 }

◆ restore_memory_contexts()

static void SK_OPTS_NS::restore_memory_contexts ( SkSpan< SkRasterPipeline_MemoryCtxPatch memoryCtxPatches,
size_t  dx,
size_t  dy,
size_t  tail 
)
static

Definition at line 1463 of file SkRasterPipeline_opts.h.

1464 {
1465 for (SkRasterPipeline_MemoryCtxPatch& patch : memoryCtxPatches) {
1466 SkRasterPipeline_MemoryCtx* ctx = patch.info.context;
1467
1468 SkASSERT(patch.backup != nullptr);
1469 ctx->pixels = patch.backup;
1470 patch.backup = nullptr;
1471
1472 const ptrdiff_t offset = patch.info.bytesPerPixel * (dy * ctx->stride + dx);
1473 if (patch.info.store) {
1474 void* ctxData = SkTAddOffset<void>(ctx->pixels, offset);
1475 memcpy(ctxData, patch.scratch, patch.info.bytesPerPixel * tail);
1476 }
1477 }
1478}

◆ round() [1/2]

SI U32 SK_OPTS_NS::round ( F  v)

Definition at line 160 of file SkRasterPipeline_opts.h.

160{ return (U32)(v + 0.5f); }

◆ round() [2/2]

SI U32 SK_OPTS_NS::round ( F  v,
F  scale 
)

Definition at line 161 of file SkRasterPipeline_opts.h.

161{ return (U32)(v*scale + 0.5f); }
const Scalar scale

◆ rsqrt()

SI F SK_OPTS_NS::rsqrt ( F  v)

Definition at line 1484 of file SkRasterPipeline_opts.h.

1484{ return rcp_precise(sqrt_(v)); }

◆ rsqrt_approx()

SI F SK_OPTS_NS::rsqrt_approx ( F  v)

Definition at line 155 of file SkRasterPipeline_opts.h.

155{ return 1.0f / sqrtf(v); }

◆ S32_alpha_D32_filter_DX()

void SK_OPTS_NS::S32_alpha_D32_filter_DX ( const SkBitmapProcState s,
const uint32_t *  xy,
int  count,
uint32_t *  colors 
)
inline

Definition at line 47 of file SkBitmapProcState_opts.h.

48 {
49 SkASSERT(count > 0 && colors != nullptr);
50 SkASSERT(s.fBilerp);
51 SkASSERT(kN32_SkColorType == s.fPixmap.colorType());
52 SkASSERT(s.fAlphaScale <= 256);
53
54 // interpolate_in_x() is the crux of the SSSE3 implementation,
55 // interpolating in X for up to two output pixels (A and B) using _mm_maddubs_epi16().
56 auto interpolate_in_x = [](uint32_t A0, uint32_t A1,
57 uint32_t B0, uint32_t B1,
58 __m128i interlaced_x_weights) {
59 // _mm_maddubs_epi16() is a little idiosyncratic, but great as the core of a lerp.
60 //
61 // It takes two arguments interlaced byte-wise:
62 // - first arg: [ l,r, ... 7 more pairs of unsigned 8-bit values ...]
63 // - second arg: [ w,W, ... 7 more pairs of signed 8-bit values ...]
64 // and returns 8 signed 16-bit values: [ l*w + r*W, ... 7 more ... ].
65 //
66 // That's why we go to all this trouble to make interlaced_x_weights,
67 // and here we're about to interlace A0 with A1 and B0 with B1 to match.
68 //
69 // Our interlaced_x_weights are all in [0,16], and so we need not worry about
70 // the signedness of that input nor about the signedness of the output.
71
72 __m128i interlaced_A = _mm_unpacklo_epi8(_mm_cvtsi32_si128(A0), _mm_cvtsi32_si128(A1)),
73 interlaced_B = _mm_unpacklo_epi8(_mm_cvtsi32_si128(B0), _mm_cvtsi32_si128(B1));
74
75 return _mm_maddubs_epi16(_mm_unpacklo_epi64(interlaced_A, interlaced_B),
76 interlaced_x_weights);
77 };
78
79 // Interpolate {A0..A3} --> output pixel A, and {B0..B3} --> output pixel B.
80 // Returns two pixels, with each color channel in a 16-bit lane of the __m128i.
81 auto interpolate_in_x_and_y = [&](uint32_t A0, uint32_t A1,
82 uint32_t A2, uint32_t A3,
83 uint32_t B0, uint32_t B1,
84 uint32_t B2, uint32_t B3,
85 __m128i interlaced_x_weights,
86 int wy) {
87 // Interpolate each row in X, leaving 16-bit lanes scaled by interlaced_x_weights.
88 __m128i top = interpolate_in_x(A0,A1, B0,B1, interlaced_x_weights),
89 bot = interpolate_in_x(A2,A3, B2,B3, interlaced_x_weights);
90
91 // Interpolate in Y. As in the SSE2 code, we calculate top*(16-wy) + bot*wy
92 // as 16*top + (bot-top)*wy to save a multiply.
93 __m128i px = _mm_add_epi16(_mm_slli_epi16(top, 4),
94 _mm_mullo_epi16(_mm_sub_epi16(bot, top),
95 _mm_set1_epi16(wy)));
96
97 // Scale down by total max weight 16x16 = 256.
98 px = _mm_srli_epi16(px, 8);
99
100 // Scale by alpha if needed.
101 if (s.fAlphaScale < 256) {
102 px = _mm_srli_epi16(_mm_mullo_epi16(px, _mm_set1_epi16(s.fAlphaScale)), 8);
103 }
104 return px;
105 };
106
107 // We're in _DX mode here, so we're only varying in X.
108 // That means the first entry of xy is our constant pair of Y coordinates and weight in Y.
109 // All the other entries in xy will be pairs of X coordinates and the X weight.
110 int y0, y1, wy;
111 decode_packed_coordinates_and_weight(*xy++, &y0, &y1, &wy);
112
113 auto row0 = (const uint32_t*)((const uint8_t*)s.fPixmap.addr() + y0 * s.fPixmap.rowBytes()),
114 row1 = (const uint32_t*)((const uint8_t*)s.fPixmap.addr() + y1 * s.fPixmap.rowBytes());
115
116 while (count >= 4) {
117 // We can really get going, loading 4 X-pairs at a time to produce 4 output pixels.
118 int x0[4],
119 x1[4];
120 __m128i wx;
121
122 // decode_packed_coordinates_and_weight(), 4x.
123 __m128i packed = _mm_loadu_si128((const __m128i*)xy);
124 _mm_storeu_si128((__m128i*)x0, _mm_srli_epi32(packed, 18));
125 _mm_storeu_si128((__m128i*)x1, _mm_and_si128 (packed, _mm_set1_epi32(0x3fff)));
126 wx = _mm_and_si128(_mm_srli_epi32(packed, 14), _mm_set1_epi32(0xf)); // [0,15]
127
128 // Splat each x weight 4x (for each color channel) as wr for pixels on the right at x1,
129 // and sixteen minus that as wl for pixels on the left at x0.
130 __m128i wr = _mm_shuffle_epi8(wx, _mm_setr_epi8(0,0,0,0,4,4,4,4,8,8,8,8,12,12,12,12)),
131 wl = _mm_sub_epi8(_mm_set1_epi8(16), wr);
132
133 // We need to interlace wl and wr for _mm_maddubs_epi16().
134 __m128i interlaced_x_weights_AB = _mm_unpacklo_epi8(wl,wr),
135 interlaced_x_weights_CD = _mm_unpackhi_epi8(wl,wr);
136
137 enum { A,B,C,D };
138
139 // interpolate_in_x_and_y() can produce two output pixels (A and B) at a time
140 // from eight input pixels {A0..A3} and {B0..B3}, arranged in a 2x2 grid for each.
141 __m128i AB = interpolate_in_x_and_y(row0[x0[A]], row0[x1[A]],
142 row1[x0[A]], row1[x1[A]],
143 row0[x0[B]], row0[x1[B]],
144 row1[x0[B]], row1[x1[B]],
145 interlaced_x_weights_AB, wy);
146
147 // Once more with the other half of the x-weights for two more pixels C,D.
148 __m128i CD = interpolate_in_x_and_y(row0[x0[C]], row0[x1[C]],
149 row1[x0[C]], row1[x1[C]],
150 row0[x0[D]], row0[x1[D]],
151 row1[x0[D]], row1[x1[D]],
152 interlaced_x_weights_CD, wy);
153
154 // Scale by alpha, pack back together to 8-bit lanes, and write out four pixels!
155 _mm_storeu_si128((__m128i*)colors, _mm_packus_epi16(AB, CD));
156 xy += 4;
157 colors += 4;
158 count -= 4;
159 }
160
161 while (count --> 0) {
162 // This is exactly the same flow as the count >= 4 loop above, but writing one pixel.
163 int x0, x1, wx;
164 decode_packed_coordinates_and_weight(*xy++, &x0, &x1, &wx);
165
166 // As above, splat out wx four times as wr, and sixteen minus that as wl.
167 __m128i wr = _mm_set1_epi8(wx), // This splats it out 16 times, but that's fine.
168 wl = _mm_sub_epi8(_mm_set1_epi8(16), wr);
169
170 __m128i interlaced_x_weights = _mm_unpacklo_epi8(wl, wr);
171
172 __m128i A = interpolate_in_x_and_y(row0[x0], row0[x1],
173 row1[x0], row1[x1],
174 0, 0,
175 0, 0,
176 interlaced_x_weights, wy);
177
178 *colors++ = _mm_cvtsi128_si32(_mm_packus_epi16(A, _mm_setzero_si128()));
179 }
180 }
static void B2(DFData *curr, int width)
static void B1(DFData *curr, int width)
#define C(TEST_CATEGORY)
Definition colrv1.cpp:247
#define B
static void decode_packed_coordinates_and_weight(U32 packed, Out *v0, Out *v1, Out *w)
PODArray< SkColor > colors
Definition SkRecords.h:276

◆ sat()

SI F SK_OPTS_NS::sat ( F  r,
F  g,
F  b 
)

Definition at line 2252 of file SkRasterPipeline_opts.h.

2252{ return max(r, max(g,b)) - min(r, min(g,b)); }

◆ save_xy()

SI void SK_OPTS_NS::save_xy ( F r,
F g,
SkRasterPipeline_SamplerCtx c 
)

Definition at line 3565 of file SkRasterPipeline_opts.h.

3565 {
3566 // Whether bilinear or bicubic, all sample points are at the same fractional offset (fx,fy).
3567 // They're either the 4 corners of a logical 1x1 pixel or the 16 corners of a 3x3 grid
3568 // surrounding (x,y) at (0.5,0.5) off-center.
3569 F fx = fract(*r + 0.5f),
3570 fy = fract(*g + 0.5f);
3571
3572 // Samplers will need to load x and fx, or y and fy.
3573 sk_unaligned_store(c->x, *r);
3574 sk_unaligned_store(c->y, *g);
3575 sk_unaligned_store(c->fx, fx);
3576 sk_unaligned_store(c->fy, fy);
3577}

◆ scatter_masked()

SI void SK_OPTS_NS::scatter_masked ( I32  src,
int dst,
U32  ix,
I32  mask 
)

Definition at line 174 of file SkRasterPipeline_opts.h.

174 {
175 dst[ix] = mask ? src : dst[ix];
176 }

◆ select_lane() [1/2]

SI int32_t SK_OPTS_NS::select_lane ( int32_t  data,
int   
)

Definition at line 2024 of file SkRasterPipeline_opts.h.

2024{ return data; }

◆ select_lane() [2/2]

SI uint32_t SK_OPTS_NS::select_lane ( uint32_t  data,
int   
)

Definition at line 2023 of file SkRasterPipeline_opts.h.

2023{ return data; }

◆ set_lum()

SI void SK_OPTS_NS::set_lum ( F r,
F g,
F b,
F  l 
)

Definition at line 2266 of file SkRasterPipeline_opts.h.

2266 {
2267 F diff = l - lum(*r, *g, *b);
2268 *r += diff;
2269 *g += diff;
2270 *b += diff;
2271}

◆ set_sat()

SI void SK_OPTS_NS::set_sat ( F r,
F g,
F b,
F  s 
)

Definition at line 2255 of file SkRasterPipeline_opts.h.

2255 {
2256 F mn = min(*r, min(*g,*b)),
2257 mx = max(*r, max(*g,*b)),
2258 sat = mx - mn;
2259
2260 // Map min channel to 0, max channel to s, and scale the middle proportionally.
2261 s = if_then_else(sat == 0.0f, 0.0f, s * rcp_fast(sat));
2262 *r = (*r - mn) * s;
2263 *g = (*g - mn) * s;
2264 *b = (*b - mn) * s;
2265}
static float sat(float r, float g, float b)
Definition hsl.cpp:51

◆ shuffle_fn()

template<int LoopCount, typename OffsetType >
SI void SK_OPTS_NS::shuffle_fn ( std::byte *  ptr,
OffsetType *  offsets,
int  numSlots 
)

Definition at line 4195 of file SkRasterPipeline_opts.h.

4195 {
4196 F scratch[16];
4197 SK_UNROLL for (int count = 0; count < LoopCount; ++count) {
4198 scratch[count] = *(F*)(ptr + offsets[count]);
4199 }
4200 // Surprisingly, this switch generates significantly better code than a memcpy (on x86-64) when
4201 // the number of slots is unknown at compile time, and generates roughly identical code when the
4202 // number of slots is hardcoded. Using a switch allows `scratch` to live in ymm0-ymm15 instead
4203 // of being written out to the stack and then read back in. Also, the intrinsic memcpy assumes
4204 // that `numSlots` could be arbitrarily large, and so it emits more code than we need.
4205 F* dst = (F*)ptr;
4206 switch (numSlots) {
4207 case 16: dst[15] = scratch[15]; [[fallthrough]];
4208 case 15: dst[14] = scratch[14]; [[fallthrough]];
4209 case 14: dst[13] = scratch[13]; [[fallthrough]];
4210 case 13: dst[12] = scratch[12]; [[fallthrough]];
4211 case 12: dst[11] = scratch[11]; [[fallthrough]];
4212 case 11: dst[10] = scratch[10]; [[fallthrough]];
4213 case 10: dst[ 9] = scratch[ 9]; [[fallthrough]];
4214 case 9: dst[ 8] = scratch[ 8]; [[fallthrough]];
4215 case 8: dst[ 7] = scratch[ 7]; [[fallthrough]];
4216 case 7: dst[ 6] = scratch[ 6]; [[fallthrough]];
4217 case 6: dst[ 5] = scratch[ 5]; [[fallthrough]];
4218 case 5: dst[ 4] = scratch[ 4]; [[fallthrough]];
4219 case 4: dst[ 3] = scratch[ 3]; [[fallthrough]];
4220 case 3: dst[ 2] = scratch[ 2]; [[fallthrough]];
4221 case 2: dst[ 1] = scratch[ 1]; [[fallthrough]];
4222 case 1: dst[ 0] = scratch[ 0];
4223 }
4224}

◆ sin5q_()

SI F SK_OPTS_NS::sin5q_ ( F  x)

Definition at line 1839 of file SkRasterPipeline_opts.h.

1839 {
1840 // A * x + B * x^3 + C * x^5
1841 // Exact at x = 0, 1/12, 1/6, 1/4, and their negatives,
1842 // which correspond to x * 2 * pi = 0, pi/6, pi/3, pi/2
1843 constexpr float A = 6.28230858f;
1844 constexpr float B = -41.1693687f;
1845 constexpr float C = 74.4388885f;
1846 F x2 = x * x;
1847 return x * mad(mad(x2, C, B), x2, A);
1848}

◆ sin_()

SI F SK_OPTS_NS::sin_ ( F  x)

Definition at line 1850 of file SkRasterPipeline_opts.h.

1850 {
1851 constexpr float one_over_pi2 = 1 / (2 * SK_FloatPI);
1852 x = mad(x, -one_over_pi2, 0.25f);
1853 x = 0.25f - abs_(x - floor_(x + 0.5f));
1854 return sin5q_(x);
1855}

◆ SkAlphaMul_lsx()

static __m128i SK_OPTS_NS::SkAlphaMul_lsx ( __m128i  x,
__m128i  y 
)
static

Definition at line 167 of file SkBlitMask_opts.h.

167 {
168 __m128i tmp = __lsx_vmul_h(x, y);
169 __m128i mask = __lsx_vreplgr2vr_h(0xff00);
170 return __lsx_vsrlri_h(__lsx_vand_v(tmp, mask), 8);
171 }

◆ small_swizzle_fn()

template<int N>
SI void SK_OPTS_NS::small_swizzle_fn ( SkRasterPipeline_SwizzleCtx packed,
std::byte *  base 
)

Definition at line 4227 of file SkRasterPipeline_opts.h.

4227 {
4228 auto ctx = SkRPCtxUtils::Unpack(packed);
4229 shuffle_fn<N>(base + ctx.dst, ctx.offsets, N);
4230}

◆ smoothstep_fn()

SI void SK_OPTS_NS::smoothstep_fn ( F edge0,
F edge1,
F x 
)

Definition at line 4902 of file SkRasterPipeline_opts.h.

4902 {
4903 F t = clamp_01_((*x - *edge0) / (*edge1 - *edge0));
4904 *edge0 = t * t * (3.0 - 2.0 * t);
4905}

◆ sqrt_()

SI F SK_OPTS_NS::sqrt_ ( F  v)

Definition at line 156 of file SkRasterPipeline_opts.h.

156{ return sqrtf(v); }

◆ stack_checkpoint()

static void ABI SK_OPTS_NS::stack_checkpoint ( Params params,
SkRasterPipelineStage program,
F  r,
F  g,
F  b,
F  a 
)
static

Definition at line 1658 of file SkRasterPipeline_opts.h.

1659 {
1660 SkRasterPipeline_RewindCtx* ctx = Ctx{program};
1661 while (program) {
1662 auto next = (Stage)(++program)->fn;
1663
1664 ctx->stage = nullptr;
1665 next(params, program, r, g, b, a);
1666 program = ctx->stage;
1667
1668 if (program) {
1669 r = sk_unaligned_load<F>(ctx->r );
1670 g = sk_unaligned_load<F>(ctx->g );
1671 b = sk_unaligned_load<F>(ctx->b );
1672 a = sk_unaligned_load<F>(ctx->a );
1673 params->dr = sk_unaligned_load<F>(ctx->dr);
1674 params->dg = sk_unaligned_load<F>(ctx->dg);
1675 params->db = sk_unaligned_load<F>(ctx->db);
1676 params->da = sk_unaligned_load<F>(ctx->da);
1677 params->base = ctx->base;
1678 }
1679 }
1680 }
static float next(float f)
const EmbeddedViewParams * params
float dg[SkRasterPipeline_kMaxStride_highp]
float g[SkRasterPipeline_kMaxStride_highp]
float dr[SkRasterPipeline_kMaxStride_highp]
float db[SkRasterPipeline_kMaxStride_highp]
float a[SkRasterPipeline_kMaxStride_highp]
float r[SkRasterPipeline_kMaxStride_highp]
float da[SkRasterPipeline_kMaxStride_highp]
float b[SkRasterPipeline_kMaxStride_highp]

◆ stack_rewind()

static void ABI SK_OPTS_NS::stack_rewind ( Params params,
SkRasterPipelineStage program,
F  r,
F  g,
F  b,
F  a 
)
static

Definition at line 1681 of file SkRasterPipeline_opts.h.

1682 {
1683 SkRasterPipeline_RewindCtx* ctx = Ctx{program};
1684 sk_unaligned_store(ctx->r , r );
1685 sk_unaligned_store(ctx->g , g );
1686 sk_unaligned_store(ctx->b , b );
1687 sk_unaligned_store(ctx->a , a );
1688 sk_unaligned_store(ctx->dr, params->dr);
1689 sk_unaligned_store(ctx->dg, params->dg);
1690 sk_unaligned_store(ctx->db, params->db);
1691 sk_unaligned_store(ctx->da, params->da);
1692 ctx->base = params->base;
1693 ctx->stage = program;
1694 }

◆ STAGE() [1/188]

SK_OPTS_NS::STAGE ( accumulate  ,
const SkRasterPipeline_SamplerCtx c 
)

Definition at line 3579 of file SkRasterPipeline_opts.h.

3579 {
3580 // Bilinear and bicubic filters are both separable, so we produce independent contributions
3581 // from x and y, multiplying them together here to get each pixel's total scale factor.
3582 auto scale = sk_unaligned_load<F>(c->scalex)
3583 * sk_unaligned_load<F>(c->scaley);
3584 dr = mad(scale, r, dr);
3585 dg = mad(scale, g, dg);
3586 db = mad(scale, b, db);
3587 da = mad(scale, a, da);
3588}

◆ STAGE() [2/188]

SK_OPTS_NS::STAGE ( alpha_to_gray  ,
NoCtx   
)

Definition at line 3308 of file SkRasterPipeline_opts.h.

3308 {
3309 r = g = b = a;
3310 a = F1;
3311}
static constexpr F F1

◆ STAGE() [3/188]

SK_OPTS_NS::STAGE ( alpha_to_gray_dst  ,
NoCtx   
)

Definition at line 3312 of file SkRasterPipeline_opts.h.

3312 {
3313 dr = dg = db = da;
3314 da = F1;
3315}

◆ STAGE() [4/188]

SK_OPTS_NS::STAGE ( alpha_to_red  ,
NoCtx   
)

Definition at line 3316 of file SkRasterPipeline_opts.h.

3316 {
3317 r = a;
3318 a = F1;
3319}

◆ STAGE() [5/188]

SK_OPTS_NS::STAGE ( alpha_to_red_dst  ,
NoCtx   
)

Definition at line 3320 of file SkRasterPipeline_opts.h.

3320 {
3321 dr = da;
3322 da = F1;
3323}

◆ STAGE() [6/188]

SK_OPTS_NS::STAGE ( alter_2pt_conical_compensate_focal  ,
const SkRasterPipeline_2PtConicalCtx ctx 
)

Definition at line 3533 of file SkRasterPipeline_opts.h.

3533 {
3534 F& t = r;
3535 t = t + ctx->fP1; // ctx->fP1 = f
3536}

◆ STAGE() [7/188]

SK_OPTS_NS::STAGE ( alter_2pt_conical_unswap  ,
NoCtx   
)

Definition at line 3538 of file SkRasterPipeline_opts.h.

3538 {
3539 F& t = r;
3540 t = 1 - t;
3541}

◆ STAGE() [8/188]

SK_OPTS_NS::STAGE ( apply_vector_mask  ,
const uint32_t *  ctx 
)

Definition at line 3557 of file SkRasterPipeline_opts.h.

3557 {
3558 const U32 mask = sk_unaligned_load<U32>(ctx);
3559 r = sk_bit_cast<F>(sk_bit_cast<U32>(r) & mask);
3560 g = sk_bit_cast<F>(sk_bit_cast<U32>(g) & mask);
3561 b = sk_bit_cast<F>(sk_bit_cast<U32>(b) & mask);
3562 a = sk_bit_cast<F>(sk_bit_cast<U32>(a) & mask);
3563}

◆ STAGE() [9/188]

SK_OPTS_NS::STAGE ( bicubic_clamp_8888  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 4995 of file SkRasterPipeline_opts.h.

4995 {
4996 // (cx,cy) are the center of our sample.
4997 F cx = r,
4998 cy = g;
4999
5000 // All sample points are at the same fractional offset (fx,fy).
5001 // They're the 4 corners of a logical 1x1 pixel surrounding (x,y) at (0.5,0.5) offsets.
5002 F fx = fract(cx + 0.5f),
5003 fy = fract(cy + 0.5f);
5004
5005 // We'll accumulate the color of all four samples into {r,g,b,a} directly.
5006 r = g = b = a = F0;
5007
5008 const float* w = ctx->weights;
5009 const F scaley[4] = {bicubic_wts(fy, w[0], w[4], w[ 8], w[12]),
5010 bicubic_wts(fy, w[1], w[5], w[ 9], w[13]),
5011 bicubic_wts(fy, w[2], w[6], w[10], w[14]),
5012 bicubic_wts(fy, w[3], w[7], w[11], w[15])};
5013 const F scalex[4] = {bicubic_wts(fx, w[0], w[4], w[ 8], w[12]),
5014 bicubic_wts(fx, w[1], w[5], w[ 9], w[13]),
5015 bicubic_wts(fx, w[2], w[6], w[10], w[14]),
5016 bicubic_wts(fx, w[3], w[7], w[11], w[15])};
5017
5018 F sample_y = cy - 1.5f;
5019 for (int yy = 0; yy <= 3; ++yy) {
5020 F sample_x = cx - 1.5f;
5021 for (int xx = 0; xx <= 3; ++xx) {
5022 F scale = scalex[xx] * scaley[yy];
5023
5024 // ix_and_ptr() will clamp to the image's bounds for us.
5025 const uint32_t* ptr;
5026 U32 ix = ix_and_ptr(&ptr, ctx, sample_x, sample_y);
5027
5028 F sr,sg,sb,sa;
5029 from_8888(gather(ptr, ix), &sr,&sg,&sb,&sa);
5030
5031 r = mad(scale, sr, r);
5032 g = mad(scale, sg, g);
5033 b = mad(scale, sb, b);
5034 a = mad(scale, sa, a);
5035
5036 sample_x += 1;
5037 }
5038 sample_y += 1;
5039 }
5040}
SI void from_8888(U32 _8888, F *r, F *g, F *b, F *a)
SI U32 ix_and_ptr(T **ptr, const SkRasterPipeline_GatherCtx *ctx, F x, F y)
SI F bicubic_wts(F t, float A, float B, float C, float D)

◆ STAGE() [10/188]

SK_OPTS_NS::STAGE ( bicubic_n1x  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3683 of file SkRasterPipeline_opts.h.

3683{ bicubic_x<-1>(ctx, &r); }
SI void bicubic_x(SkRasterPipeline_SamplerCtx *ctx, F *x)

◆ STAGE() [11/188]

SK_OPTS_NS::STAGE ( bicubic_n1y  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3688 of file SkRasterPipeline_opts.h.

3688{ bicubic_y<-1>(ctx, &g); }
SI void bicubic_y(SkRasterPipeline_SamplerCtx *ctx, F *y)

◆ STAGE() [12/188]

SK_OPTS_NS::STAGE ( bicubic_n3x  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3682 of file SkRasterPipeline_opts.h.

3682{ bicubic_x<-3>(ctx, &r); }

◆ STAGE() [13/188]

SK_OPTS_NS::STAGE ( bicubic_n3y  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3687 of file SkRasterPipeline_opts.h.

3687{ bicubic_y<-3>(ctx, &g); }

◆ STAGE() [14/188]

SK_OPTS_NS::STAGE ( bicubic_p1x  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3684 of file SkRasterPipeline_opts.h.

3684{ bicubic_x<+1>(ctx, &r); }

◆ STAGE() [15/188]

SK_OPTS_NS::STAGE ( bicubic_p1y  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3689 of file SkRasterPipeline_opts.h.

3689{ bicubic_y<+1>(ctx, &g); }

◆ STAGE() [16/188]

SK_OPTS_NS::STAGE ( bicubic_p3x  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3685 of file SkRasterPipeline_opts.h.

3685{ bicubic_x<+3>(ctx, &r); }

◆ STAGE() [17/188]

SK_OPTS_NS::STAGE ( bicubic_p3y  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3690 of file SkRasterPipeline_opts.h.

3690{ bicubic_y<+3>(ctx, &g); }

◆ STAGE() [18/188]

SK_OPTS_NS::STAGE ( bicubic_setup  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3661 of file SkRasterPipeline_opts.h.

3661 {
3662 save_xy(&r, &g, ctx);
3663
3664 const float* w = ctx->weights;
3665
3666 F fx = sk_unaligned_load<F>(ctx->fx);
3667 sk_unaligned_store(ctx->wx[0], bicubic_wts(fx, w[0], w[4], w[ 8], w[12]));
3668 sk_unaligned_store(ctx->wx[1], bicubic_wts(fx, w[1], w[5], w[ 9], w[13]));
3669 sk_unaligned_store(ctx->wx[2], bicubic_wts(fx, w[2], w[6], w[10], w[14]));
3670 sk_unaligned_store(ctx->wx[3], bicubic_wts(fx, w[3], w[7], w[11], w[15]));
3671
3672 F fy = sk_unaligned_load<F>(ctx->fy);
3673 sk_unaligned_store(ctx->wy[0], bicubic_wts(fy, w[0], w[4], w[ 8], w[12]));
3674 sk_unaligned_store(ctx->wy[1], bicubic_wts(fy, w[1], w[5], w[ 9], w[13]));
3675 sk_unaligned_store(ctx->wy[2], bicubic_wts(fy, w[2], w[6], w[10], w[14]));
3676 sk_unaligned_store(ctx->wy[3], bicubic_wts(fy, w[3], w[7], w[11], w[15]));
3677
3678 // Init for accumulate
3679 dr = dg = db = da = F0;
3680}
SI void save_xy(F *r, F *g, SkRasterPipeline_SamplerCtx *c)

◆ STAGE() [19/188]

SK_OPTS_NS::STAGE ( bilerp_clamp_8888  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 4953 of file SkRasterPipeline_opts.h.

4953 {
4954 // (cx,cy) are the center of our sample.
4955 F cx = r,
4956 cy = g;
4957
4958 // All sample points are at the same fractional offset (fx,fy).
4959 // They're the 4 corners of a logical 1x1 pixel surrounding (x,y) at (0.5,0.5) offsets.
4960 F fx = fract(cx + 0.5f),
4961 fy = fract(cy + 0.5f);
4962
4963 // We'll accumulate the color of all four samples into {r,g,b,a} directly.
4964 r = g = b = a = F0;
4965
4966 for (float py = -0.5f; py <= +0.5f; py += 1.0f)
4967 for (float px = -0.5f; px <= +0.5f; px += 1.0f) {
4968 // (x,y) are the coordinates of this sample point.
4969 F x = cx + px,
4970 y = cy + py;
4971
4972 // ix_and_ptr() will clamp to the image's bounds for us.
4973 const uint32_t* ptr;
4974 U32 ix = ix_and_ptr(&ptr, ctx, x,y);
4975
4976 F sr,sg,sb,sa;
4977 from_8888(gather(ptr, ix), &sr,&sg,&sb,&sa);
4978
4979 // In bilinear interpolation, the 4 pixels at +/- 0.5 offsets from the sample pixel center
4980 // are combined in direct proportion to their area overlapping that logical query pixel.
4981 // At positive offsets, the x-axis contribution to that rectangle is fx,
4982 // or (1-fx) at negative x. Same deal for y.
4983 F sx = (px > 0) ? fx : 1.0f - fx,
4984 sy = (py > 0) ? fy : 1.0f - fy,
4985 area = sx * sy;
4986
4987 r += sr * area;
4988 g += sg * area;
4989 b += sb * area;
4990 a += sa * area;
4991 }
4992}

◆ STAGE() [20/188]

SK_OPTS_NS::STAGE ( bilinear_nx  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3622 of file SkRasterPipeline_opts.h.

3622{ bilinear_x<-1>(ctx, &r); }
SI void bilinear_x(SkRasterPipeline_SamplerCtx *ctx, F *x)

◆ STAGE() [21/188]

SK_OPTS_NS::STAGE ( bilinear_ny  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3624 of file SkRasterPipeline_opts.h.

3624{ bilinear_y<-1>(ctx, &g); }
SI void bilinear_y(SkRasterPipeline_SamplerCtx *ctx, F *y)

◆ STAGE() [22/188]

SK_OPTS_NS::STAGE ( bilinear_px  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3623 of file SkRasterPipeline_opts.h.

3623{ bilinear_x<+1>(ctx, &r); }

◆ STAGE() [23/188]

SK_OPTS_NS::STAGE ( bilinear_py  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3625 of file SkRasterPipeline_opts.h.

3625{ bilinear_y<+1>(ctx, &g); }

◆ STAGE() [24/188]

SK_OPTS_NS::STAGE ( bilinear_setup  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3616 of file SkRasterPipeline_opts.h.

3616 {
3617 save_xy(&r, &g, ctx);
3618 // Init for accumulate
3619 dr = dg = db = da = F0;
3620}

◆ STAGE() [25/188]

SK_OPTS_NS::STAGE ( black_color  ,
NoCtx   
)

Definition at line 2105 of file SkRasterPipeline_opts.h.

2105 {
2106 r = g = b = F0;
2107 a = F1;
2108}

◆ STAGE() [26/188]

SK_OPTS_NS::STAGE ( bt709_luminance_or_luma_to_alpha  ,
NoCtx   
)

Definition at line 3325 of file SkRasterPipeline_opts.h.

3325 {
3326 a = r*0.2126f + g*0.7152f + b*0.0722f;
3327 r = g = b = F0;
3328}

◆ STAGE() [27/188]

SK_OPTS_NS::STAGE ( bt709_luminance_or_luma_to_rgb  ,
NoCtx   
)

Definition at line 3329 of file SkRasterPipeline_opts.h.

3329 {
3330 r = g = b = r*0.2126f + g*0.7152f + b*0.0722f;
3331}

◆ STAGE() [28/188]

SK_OPTS_NS::STAGE ( byte_tables  ,
const SkRasterPipeline_TablesCtx tables 
)

Definition at line 2713 of file SkRasterPipeline_opts.h.

2713 {
2714 r = from_byte(gather(tables->r, to_unorm(r, 255)));
2715 g = from_byte(gather(tables->g, to_unorm(g, 255)));
2716 b = from_byte(gather(tables->b, to_unorm(b, 255)));
2717 a = from_byte(gather(tables->a, to_unorm(a, 255)));
2718}
SI U32 to_unorm(F v, float scale, float bias=1.0f)

◆ STAGE() [29/188]

SK_OPTS_NS::STAGE ( callback  ,
SkRasterPipeline_CallbackCtx c 
)

Definition at line 3832 of file SkRasterPipeline_opts.h.

3832 {
3833 store4(c->rgba, r,g,b,a);
3834 c->fn(c, N);
3835 load4(c->read_from, &r,&g,&b,&a);
3836}
SI void load4(const uint16_t *ptr, U16 *r, U16 *g, U16 *b, U16 *a)
SI void store4(uint16_t *ptr, U16 r, U16 g, U16 b, U16 a)
float rgba[4 *SkRasterPipeline_kMaxStride_highp]
void(* fn)(SkRasterPipeline_CallbackCtx *self, int active_pixels)

◆ STAGE() [30/188]

SK_OPTS_NS::STAGE ( check_decal_mask  ,
SkRasterPipeline_DecalTileCtx ctx 
)

Definition at line 3300 of file SkRasterPipeline_opts.h.

3300 {
3301 auto mask = sk_unaligned_load<U32>(ctx->mask);
3302 r = sk_bit_cast<F>(sk_bit_cast<U32>(r) & mask);
3303 g = sk_bit_cast<F>(sk_bit_cast<U32>(g) & mask);
3304 b = sk_bit_cast<F>(sk_bit_cast<U32>(b) & mask);
3305 a = sk_bit_cast<F>(sk_bit_cast<U32>(a) & mask);
3306}
uint32_t mask[SkRasterPipeline_kMaxStride]

◆ STAGE() [31/188]

SK_OPTS_NS::STAGE ( clamp_01  ,
NoCtx   
)

Definition at line 2374 of file SkRasterPipeline_opts.h.

2374 {
2375 r = clamp_01_(r);
2376 g = clamp_01_(g);
2377 b = clamp_01_(b);
2378 a = clamp_01_(a);
2379}

◆ STAGE() [32/188]

SK_OPTS_NS::STAGE ( clamp_gamut  ,
NoCtx   
)

Definition at line 2381 of file SkRasterPipeline_opts.h.

2381 {
2382 a = min(max(a, 0.0f), 1.0f);
2383 r = min(max(r, 0.0f), a);
2384 g = min(max(g, 0.0f), a);
2385 b = min(max(b, 0.0f), a);
2386}

◆ STAGE() [33/188]

SK_OPTS_NS::STAGE ( clamp_x_1  ,
NoCtx   
)

Definition at line 3264 of file SkRasterPipeline_opts.h.

3264{ r = clamp_01_(r); }

◆ STAGE() [34/188]

SK_OPTS_NS::STAGE ( clamp_x_and_y  ,
const SkRasterPipeline_CoordClampCtx ctx 
)

◆ STAGE() [35/188]

SK_OPTS_NS::STAGE ( color  ,
NoCtx   
)

Definition at line 2320 of file SkRasterPipeline_opts.h.

2320 {
2321 F R = r*da,
2322 G = g*da,
2323 B = b*da;
2324
2325 set_lum(&R, &G, &B, lum(dr,dg,db)*a);
2326 clip_color(&R,&G,&B, a*da);
2327
2328 r = mad(r, inv(da), mad(dr, inv(a), R));
2329 g = mad(g, inv(da), mad(dg, inv(a), G));
2330 b = mad(b, inv(da), mad(db, inv(a), B));
2331 a = a + nmad(a, da, da);
2332}
static void clip_color(float *r, float *g, float *b)
Definition hsl.cpp:68
static void set_lum(float *r, float *g, float *b, float l)
Definition hsl.cpp:83
#define R(r)
Definition SkMD5.cpp:125

◆ STAGE() [36/188]

SK_OPTS_NS::STAGE ( css_hcl_to_lab  ,
NoCtx   
)

Definition at line 2553 of file SkRasterPipeline_opts.h.

2553 {
2554 F H = r,
2555 C = g,
2556 L = b;
2557
2558 F hueRadians = H * (SK_FloatPI / 180);
2559
2560 r = L;
2561 g = C * cos_(hueRadians);
2562 b = C * sin_(hueRadians);
2563}
Definition SkMD5.cpp:130

◆ STAGE() [37/188]

SK_OPTS_NS::STAGE ( css_hsl_to_srgb  ,
NoCtx   
)

Definition at line 2590 of file SkRasterPipeline_opts.h.

2590 {
2591 RGB rgb = css_hsl_to_srgb_(r, g, b);
2592 r = rgb.r;
2593 g = rgb.g;
2594 b = rgb.b;
2595}
SI RGB css_hsl_to_srgb_(F h, F s, F l)

◆ STAGE() [38/188]

SK_OPTS_NS::STAGE ( css_hwb_to_srgb  ,
NoCtx   
)

Definition at line 2597 of file SkRasterPipeline_opts.h.

2597 {
2598 g *= 0.01f;
2599 b *= 0.01f;
2600
2601 F gray = g / (g + b);
2602
2603 RGB rgb = css_hsl_to_srgb_(r, F_(100.0f), F_(50.0f));
2604 rgb.r = rgb.r * (1 - g - b) + g;
2605 rgb.g = rgb.g * (1 - g - b) + g;
2606 rgb.b = rgb.b * (1 - g - b) + g;
2607
2608 auto isGray = (g + b) >= 1;
2609
2610 r = if_then_else(isGray, gray, rgb.r);
2611 g = if_then_else(isGray, gray, rgb.g);
2612 b = if_then_else(isGray, gray, rgb.b);
2613}

◆ STAGE() [39/188]

SK_OPTS_NS::STAGE ( css_lab_to_xyz  ,
NoCtx   
)

Definition at line 2497 of file SkRasterPipeline_opts.h.

2497 {
2498 constexpr float k = 24389 / 27.0f;
2499 constexpr float e = 216 / 24389.0f;
2500
2501 F f[3];
2502 f[1] = (r + 16) * (1 / 116.0f);
2503 f[0] = (g * (1 / 500.0f)) + f[1];
2504 f[2] = f[1] - (b * (1 / 200.0f));
2505
2506 F f_cubed[3] = { f[0]*f[0]*f[0], f[1]*f[1]*f[1], f[2]*f[2]*f[2] };
2507
2508 F xyz[3] = {
2509 if_then_else(f_cubed[0] > e, f_cubed[0], (116 * f[0] - 16) * (1 / k)),
2510 if_then_else(r > k * e, f_cubed[1], r * (1 / k)),
2511 if_then_else(f_cubed[2] > e, f_cubed[2], (116 * f[2] - 16) * (1 / k))
2512 };
2513
2514 constexpr float D50[3] = { 0.3457f / 0.3585f, 1.0f, (1.0f - 0.3457f - 0.3585f) / 0.3585f };
2515 r = xyz[0]*D50[0];
2516 g = xyz[1]*D50[1];
2517 b = xyz[2]*D50[2];
2518}

◆ STAGE() [40/188]

SK_OPTS_NS::STAGE ( css_oklab_gamut_map_to_linear_srgb  ,
NoCtx   
)

Definition at line 2534 of file SkRasterPipeline_opts.h.

2534 {
2535 // TODO(https://crbug.com/1508329): Add support for gamut mapping.
2536 // Return a greyscale value, so that accidental use is obvious.
2537 F l_ = r,
2538 m_ = r,
2539 s_ = r;
2540
2541 F l = l_*l_*l_,
2542 m = m_*m_*m_,
2543 s = s_*s_*s_;
2544
2545 r = +4.0767416621f * l - 3.3077115913f * m + 0.2309699292f * s;
2546 g = -1.2684380046f * l + 2.6097574011f * m - 0.3413193965f * s;
2547 b = -0.0041960863f * l - 0.7034186147f * m + 1.7076147010f * s;
2548}

◆ STAGE() [41/188]

SK_OPTS_NS::STAGE ( css_oklab_to_linear_srgb  ,
NoCtx   
)

Definition at line 2520 of file SkRasterPipeline_opts.h.

2520 {
2521 F l_ = r + 0.3963377774f * g + 0.2158037573f * b,
2522 m_ = r - 0.1055613458f * g - 0.0638541728f * b,
2523 s_ = r - 0.0894841775f * g - 1.2914855480f * b;
2524
2525 F l = l_*l_*l_,
2526 m = m_*m_*m_,
2527 s = s_*s_*s_;
2528
2529 r = +4.0767416621f * l - 3.3077115913f * m + 0.2309699292f * s;
2530 g = -1.2684380046f * l + 2.6097574011f * m - 0.3413193965f * s;
2531 b = -0.0041960863f * l - 0.7034186147f * m + 1.7076147010f * s;
2532}

◆ STAGE() [42/188]

SK_OPTS_NS::STAGE ( decal_x  ,
SkRasterPipeline_DecalTileCtx ctx 
)

Definition at line 3279 of file SkRasterPipeline_opts.h.

3279 {
3280 auto w = ctx->limit_x;
3281 auto e = ctx->inclusiveEdge_x;
3282 auto cond = ((0 < r) & (r < w)) | (r == e);
3284}

◆ STAGE() [43/188]

SK_OPTS_NS::STAGE ( decal_x_and_y  ,
SkRasterPipeline_DecalTileCtx ctx 
)

Definition at line 3291 of file SkRasterPipeline_opts.h.

3291 {
3292 auto w = ctx->limit_x;
3293 auto h = ctx->limit_y;
3294 auto ex = ctx->inclusiveEdge_x;
3295 auto ey = ctx->inclusiveEdge_y;
3296 auto cond = (((0 < r) & (r < w)) | (r == ex))
3297 & (((0 < g) & (g < h)) | (g == ey));
3299}

◆ STAGE() [44/188]

SK_OPTS_NS::STAGE ( decal_y  ,
SkRasterPipeline_DecalTileCtx ctx 
)

Definition at line 3285 of file SkRasterPipeline_opts.h.

3285 {
3286 auto h = ctx->limit_y;
3287 auto e = ctx->inclusiveEdge_y;
3288 auto cond = ((0 < g) & (g < h)) | (g == e);
3290}

◆ STAGE() [45/188]

SK_OPTS_NS::STAGE ( dither  ,
const float *  rate 
)

Definition at line 2049 of file SkRasterPipeline_opts.h.

2049 {
2050 // Get [(dx,dy), (dx+1,dy), (dx+2,dy), ...] loaded up in integer vectors.
2051 uint32_t iota[] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
2052 static_assert(std::size(iota) >= SkRasterPipeline_kMaxStride_highp);
2053
2054 U32 X = U32_(dx) + sk_unaligned_load<U32>(iota),
2055 Y = U32_(dy);
2056
2057 // We're doing 8x8 ordered dithering, see https://en.wikipedia.org/wiki/Ordered_dithering.
2058 // In this case n=8 and we're using the matrix that looks like 1/64 x [ 0 48 12 60 ... ].
2059
2060 // We only need X and X^Y from here on, so it's easier to just think of that as "Y".
2061 Y ^= X;
2062
2063 // We'll mix the bottom 3 bits of each of X and Y to make 6 bits,
2064 // for 2^6 == 64 == 8x8 matrix values. If X=abc and Y=def, we make fcebda.
2065 U32 M = (Y & 1) << 5 | (X & 1) << 4
2066 | (Y & 2) << 2 | (X & 2) << 1
2067 | (Y & 4) >> 1 | (X & 4) >> 2;
2068
2069 // Scale that dither to [0,1), then (-0.5,+0.5), here using 63/128 = 0.4921875 as 0.5-epsilon.
2070 // We want to make sure our dither is less than 0.5 in either direction to keep exact values
2071 // like 0 and 1 unchanged after rounding.
2072 F dither = cast(M) * (2/128.0f) - (63/128.0f);
2073
2074 r += *rate*dither;
2075 g += *rate*dither;
2076 b += *rate*dither;
2077
2078 r = max(0.0f, min(r, a));
2079 g = max(0.0f, min(g, a));
2080 b = max(0.0f, min(b, a));
2081}
static constexpr int SkRasterPipeline_kMaxStride_highp
#define M(st)
static const SkScalar Y
static const SkScalar X

◆ STAGE() [46/188]

SK_OPTS_NS::STAGE ( emboss  ,
const SkRasterPipeline_EmbossCtx ctx 
)

Definition at line 2701 of file SkRasterPipeline_opts.h.

2701 {
2702 auto mptr = ptr_at_xy<const uint8_t>(&ctx->mul, dx,dy),
2703 aptr = ptr_at_xy<const uint8_t>(&ctx->add, dx,dy);
2704
2705 F mul = from_byte(load<U8>(mptr)),
2706 add = from_byte(load<U8>(aptr));
2707
2708 r = mad(r, mul, add);
2709 g = mad(g, mul, add);
2710 b = mad(b, mul, add);
2711}

◆ STAGE() [47/188]

SK_OPTS_NS::STAGE ( evenly_spaced_2_stop_gradient  ,
const SkRasterPipeline_EvenlySpaced2StopGradientCtx c 
)

Definition at line 3464 of file SkRasterPipeline_opts.h.

3464 {
3465 auto t = r;
3466 r = mad(t, c->f[0], c->b[0]);
3467 g = mad(t, c->f[1], c->b[1]);
3468 b = mad(t, c->f[2], c->b[2]);
3469 a = mad(t, c->f[3], c->b[3]);
3470}

◆ STAGE() [48/188]

SK_OPTS_NS::STAGE ( evenly_spaced_gradient  ,
const SkRasterPipeline_GradientCtx c 
)

Definition at line 3446 of file SkRasterPipeline_opts.h.

3446 {
3447 auto t = r;
3448 auto idx = trunc_(t * static_cast<float>(c->stopCount-1));
3449 gradient_lookup(c, idx, t, &r, &g, &b, &a);
3450}
SI void gradient_lookup(const SkRasterPipeline_GradientCtx *c, U32 idx, F t, F *r, F *g, F *b, F *a)

◆ STAGE() [49/188]

SK_OPTS_NS::STAGE ( force_opaque  ,
NoCtx   
)

Definition at line 2454 of file SkRasterPipeline_opts.h.

2454{ a = F1; }

◆ STAGE() [50/188]

SK_OPTS_NS::STAGE ( force_opaque_dst  ,
NoCtx   
)

Definition at line 2455 of file SkRasterPipeline_opts.h.

2455{ da = F1; }

◆ STAGE() [51/188]

SK_OPTS_NS::STAGE ( gamma_  ,
const float *  G 
)

Definition at line 2744 of file SkRasterPipeline_opts.h.

2744 {
2745 auto fn = [&](F v) {
2746 U32 sign;
2747 v = strip_sign(v, &sign);
2748 return apply_sign(approx_powf(v, *G), sign);
2749 };
2750 r = fn(r);
2751 g = fn(g);
2752 b = fn(b);
2753}
SI F apply_sign(F x, U32 sign)
SI F strip_sign(F x, U32 *sign)

◆ STAGE() [52/188]

SK_OPTS_NS::STAGE ( gather_10101010_xr  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 3063 of file SkRasterPipeline_opts.h.

3063 {
3064 const uint64_t* ptr;
3065 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
3066 from_10101010_xr(gather(ptr, ix), &r, &g, &b, &a);
3067}
SI void from_10101010_xr(U64 _10x6, F *r, F *g, F *b, F *a)

◆ STAGE() [53/188]

SK_OPTS_NS::STAGE ( gather_1010102  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 3053 of file SkRasterPipeline_opts.h.

3053 {
3054 const uint32_t* ptr;
3055 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
3056 from_1010102(gather(ptr, ix), &r,&g,&b,&a);
3057}
SI void from_1010102(U32 rgba, F *r, F *g, F *b, F *a)

◆ STAGE() [54/188]

SK_OPTS_NS::STAGE ( gather_1010102_xr  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 3058 of file SkRasterPipeline_opts.h.

3058 {
3059 const uint32_t* ptr;
3060 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
3061 from_1010102_xr(gather(ptr, ix), &r,&g,&b,&a);
3062}
SI void from_1010102_xr(U32 rgba, F *r, F *g, F *b, F *a)

◆ STAGE() [55/188]

SK_OPTS_NS::STAGE ( gather_10x6  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 3020 of file SkRasterPipeline_opts.h.

3020 {
3021 const uint64_t* ptr;
3022 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
3023 from_10x6(gather(ptr, ix), &r, &g, &b, &a);
3024}
SI void from_10x6(U64 _10x6, F *r, F *g, F *b, F *a)

◆ STAGE() [56/188]

SK_OPTS_NS::STAGE ( gather_16161616  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 2996 of file SkRasterPipeline_opts.h.

2996 {
2997 const uint64_t* ptr;
2998 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2999 from_16161616(gather(ptr, ix), &r, &g, &b, &a);
3000}
SI void from_16161616(U64 _16161616, F *r, F *g, F *b, F *a)

◆ STAGE() [57/188]

SK_OPTS_NS::STAGE ( gather_4444  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 2876 of file SkRasterPipeline_opts.h.

2876 {
2877 const uint16_t* ptr;
2878 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
2879 from_4444(gather(ptr, ix), &r,&g,&b,&a);
2880}
SI void from_4444(U16 _4444, F *r, F *g, F *b, F *a)

◆ STAGE() [58/188]

SK_OPTS_NS::STAGE ( gather_565  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 2853 of file SkRasterPipeline_opts.h.

2853 {
2854 const uint16_t* ptr;
2855 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
2856 from_565(gather(ptr, ix), &r,&g,&b);
2857 a = F1;
2858}
SI void from_565(U16 _565, F *r, F *g, F *b)

◆ STAGE() [59/188]

SK_OPTS_NS::STAGE ( gather_8888  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 2898 of file SkRasterPipeline_opts.h.

2898 {
2899 const uint32_t* ptr;
2900 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
2901 from_8888(gather(ptr, ix), &r,&g,&b,&a);
2902}

◆ STAGE() [60/188]

SK_OPTS_NS::STAGE ( gather_a16  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 2948 of file SkRasterPipeline_opts.h.

2948 {
2949 const uint16_t* ptr;
2950 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2951 r = g = b = F0;
2952 a = from_short(gather(ptr, ix));
2953}

◆ STAGE() [61/188]

SK_OPTS_NS::STAGE ( gather_a8  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 2822 of file SkRasterPipeline_opts.h.

2822 {
2823 const uint8_t* ptr;
2824 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
2825 r = g = b = F0;
2826 a = from_byte(gather(ptr, ix));
2827}

◆ STAGE() [62/188]

SK_OPTS_NS::STAGE ( gather_af16  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 3166 of file SkRasterPipeline_opts.h.

3166 {
3167 const uint16_t* ptr;
3168 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
3169 r = g = b = F0;
3170 a = from_half(gather(ptr, ix));
3171}

◆ STAGE() [63/188]

SK_OPTS_NS::STAGE ( gather_f16  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 3130 of file SkRasterPipeline_opts.h.

3130 {
3131 const uint64_t* ptr;
3132 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
3133 auto px = gather(ptr, ix);
3134
3135 U16 R,G,B,A;
3136 load4((const uint16_t*)&px, &R,&G,&B,&A);
3137 r = from_half(R);
3138 g = from_half(G);
3139 b = from_half(B);
3140 a = from_half(A);
3141}

◆ STAGE() [64/188]

SK_OPTS_NS::STAGE ( gather_f32  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 3223 of file SkRasterPipeline_opts.h.

3223 {
3224 const float* ptr;
3225 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
3226 r = gather(ptr, 4*ix + 0);
3227 g = gather(ptr, 4*ix + 1);
3228 b = gather(ptr, 4*ix + 2);
3229 a = gather(ptr, 4*ix + 3);
3230}

◆ STAGE() [65/188]

SK_OPTS_NS::STAGE ( gather_rg1616  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 2973 of file SkRasterPipeline_opts.h.

2973 {
2974 const uint32_t* ptr;
2975 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2976 from_1616(gather(ptr, ix), &r, &g);
2977 b = F0;
2978 a = F1;
2979}
SI void from_1616(U32 _1616, F *r, F *g)

◆ STAGE() [66/188]

SK_OPTS_NS::STAGE ( gather_rg88  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 2925 of file SkRasterPipeline_opts.h.

2925 {
2926 const uint16_t* ptr;
2927 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2928 from_88(gather(ptr, ix), &r, &g);
2929 b = F0;
2930 a = F1;
2931}
SI void from_88(U16 _88, F *r, F *g)

◆ STAGE() [67/188]

SK_OPTS_NS::STAGE ( gather_rgf16  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 3197 of file SkRasterPipeline_opts.h.

3197 {
3198 const uint32_t* ptr;
3199 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
3200 auto px = gather(ptr, ix);
3201
3202 U16 R,G;
3203 load2((const uint16_t*)&px, &R, &G);
3204 r = from_half(R);
3205 g = from_half(G);
3206 b = F0;
3207 a = F1;
3208}
SI void load2(const uint16_t *ptr, U16 *r, U16 *g)

◆ STAGE() [68/188]

SK_OPTS_NS::STAGE ( gauss_a_to_rgba  ,
NoCtx   
)

Definition at line 4936 of file SkRasterPipeline_opts.h.

4936 {
4937 // x = 1 - x;
4938 // exp(-x * x * 4) - 0.018f;
4939 // ... now approximate with quartic
4940 //
4941 const float c4 = -2.26661229133605957031f;
4942 const float c3 = 2.89795351028442382812f;
4943 const float c2 = 0.21345567703247070312f;
4944 const float c1 = 0.15489584207534790039f;
4945 const float c0 = 0.00030726194381713867f;
4946 a = mad(a, mad(a, mad(a, mad(a, c4, c3), c2), c1), c0);
4947 r = a;
4948 g = a;
4949 b = a;
4950}

◆ STAGE() [69/188]

SK_OPTS_NS::STAGE ( gradient  ,
const SkRasterPipeline_GradientCtx c 
)

Definition at line 3452 of file SkRasterPipeline_opts.h.

3452 {
3453 auto t = r;
3454 U32 idx = U32_(0);
3455
3456 // N.B. The loop starts at 1 because idx 0 is the color to use before the first stop.
3457 for (size_t i = 1; i < c->stopCount; i++) {
3458 idx += (U32)if_then_else(t >= c->ts[i], I32_(1), I32_(0));
3459 }
3460
3461 gradient_lookup(c, idx, t, &r, &g, &b, &a);
3462}
SI constexpr I32 I32_(int32_t x)
SI constexpr U32 U32_(uint32_t x)

◆ STAGE() [70/188]

SK_OPTS_NS::STAGE ( HLGinvish  ,
const skcms_TransferFunction ctx 
)

Definition at line 2790 of file SkRasterPipeline_opts.h.

2790 {
2791 auto fn = [&](F v) {
2792 U32 sign;
2793 v = strip_sign(v, &sign);
2794
2795 const float R = ctx->a, G = ctx->b,
2796 a = ctx->c, b = ctx->d, c = ctx->e,
2797 K = ctx->f + 1.0f;
2798
2799 v /= K;
2800 F r = if_then_else(v <= 1, R * approx_powf(v, G)
2801 , a * approx_log(v - b) + c);
2802
2803 return apply_sign(r, sign);
2804 };
2805 r = fn(r);
2806 g = fn(g);
2807 b = fn(b);
2808}
SI F approx_log(F x)
static const int K
Definition daa.cpp:21

◆ STAGE() [71/188]

SK_OPTS_NS::STAGE ( HLGish  ,
const skcms_TransferFunction ctx 
)

Definition at line 2771 of file SkRasterPipeline_opts.h.

2771 {
2772 auto fn = [&](F v) {
2773 U32 sign;
2774 v = strip_sign(v, &sign);
2775
2776 const float R = ctx->a, G = ctx->b,
2777 a = ctx->c, b = ctx->d, c = ctx->e,
2778 K = ctx->f + 1.0f;
2779
2780 F r = if_then_else(v*R <= 1, approx_powf(v*R, G)
2781 , approx_exp((v-c)*a) + b);
2782
2783 return K * apply_sign(r, sign);
2784 };
2785 r = fn(r);
2786 g = fn(g);
2787 b = fn(b);
2788}
SI F approx_exp(F x)

◆ STAGE() [72/188]

SK_OPTS_NS::STAGE ( hsl_to_rgb  ,
NoCtx   
)

Definition at line 2477 of file SkRasterPipeline_opts.h.

2477 {
2478 // See GrRGBToHSLFilterEffect.fp
2479
2480 F h = r,
2481 s = g,
2482 l = b,
2483 c = (1.0f - abs_(2.0f * l - 1)) * s;
2484
2485 auto hue_to_rgb = [&](F hue) {
2486 F q = clamp_01_(abs_(fract(hue) * 6.0f - 3.0f) - 1.0f);
2487 return (q - 0.5f) * c + l;
2488 };
2489
2490 r = hue_to_rgb(h + 0.0f/3.0f);
2491 g = hue_to_rgb(h + 2.0f/3.0f);
2492 b = hue_to_rgb(h + 1.0f/3.0f);
2493}
static void hue(float dr, float dg, float db, float *sr, float *sg, float *sb)
Definition hsl.cpp:92

◆ STAGE() [73/188]

SK_OPTS_NS::STAGE ( hue  ,
NoCtx   
)

Definition at line 2292 of file SkRasterPipeline_opts.h.

2292 {
2293 F R = r*a,
2294 G = g*a,
2295 B = b*a;
2296
2297 set_sat(&R, &G, &B, sat(dr,dg,db)*a);
2298 set_lum(&R, &G, &B, lum(dr,dg,db)*a);
2299 clip_color(&R,&G,&B, a*da);
2300
2301 r = mad(r, inv(da), mad(dr, inv(a), R));
2302 g = mad(g, inv(da), mad(dg, inv(a), G));
2303 b = mad(b, inv(da), mad(db, inv(a), B));
2304 a = a + nmad(a, da, da);
2305}
static void set_sat(float *r, float *g, float *b, float s)
Definition hsl.cpp:57

◆ STAGE() [74/188]

SK_OPTS_NS::STAGE ( lerp_1_float  ,
const float *  c 
)

Definition at line 2656 of file SkRasterPipeline_opts.h.

2656 {
2657 r = lerp(dr, r, F_(*c));
2658 g = lerp(dg, g, F_(*c));
2659 b = lerp(db, b, F_(*c));
2660 a = lerp(da, a, F_(*c));
2661}

◆ STAGE() [75/188]

SK_OPTS_NS::STAGE ( lerp_565  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2687 of file SkRasterPipeline_opts.h.

2687 {
2688 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2689
2690 F cr,cg,cb;
2691 from_565(load<U16>(ptr), &cr, &cg, &cb);
2692
2693 F ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
2694
2695 r = lerp(dr, r, cr);
2696 g = lerp(dg, g, cg);
2697 b = lerp(db, b, cb);
2698 a = lerp(da, a, ca);
2699}
SI F alpha_coverage_from_rgb_coverage(F a, F da, F cr, F cg, F cb)

◆ STAGE() [76/188]

SK_OPTS_NS::STAGE ( lerp_native  ,
const float  scales[] 
)

Definition at line 2669 of file SkRasterPipeline_opts.h.

2669 {
2670 auto c = sk_unaligned_load<F>(scales);
2671 r = lerp(dr, r, c);
2672 g = lerp(dg, g, c);
2673 b = lerp(db, b, c);
2674 a = lerp(da, a, c);
2675}

◆ STAGE() [77/188]

SK_OPTS_NS::STAGE ( lerp_u8  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2676 of file SkRasterPipeline_opts.h.

2676 {
2677 auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
2678
2679 auto scales = load<U8>(ptr);
2680 auto c = from_byte(scales);
2681
2682 r = lerp(dr, r, c);
2683 g = lerp(dg, g, c);
2684 b = lerp(db, b, c);
2685 a = lerp(da, a, c);
2686}

◆ STAGE() [78/188]

SK_OPTS_NS::STAGE ( load_10101010_xr  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3068 of file SkRasterPipeline_opts.h.

3068 {
3069 auto ptr = ptr_at_xy<const uint64_t>(ctx, dx, dy);
3070 from_10101010_xr(load<U64>(ptr), &r,&g, &b, &a);
3071}

◆ STAGE() [79/188]

SK_OPTS_NS::STAGE ( load_10101010_xr_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3072 of file SkRasterPipeline_opts.h.

3072 {
3073 auto ptr = ptr_at_xy<const uint64_t>(ctx, dx, dy);
3074 from_10101010_xr(load<U64>(ptr), &dr, &dg, &db, &da);
3075}

◆ STAGE() [80/188]

SK_OPTS_NS::STAGE ( load_1010102  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3037 of file SkRasterPipeline_opts.h.

3037 {
3038 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
3039 from_1010102(load<U32>(ptr), &r,&g,&b,&a);
3040}

◆ STAGE() [81/188]

SK_OPTS_NS::STAGE ( load_1010102_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3041 of file SkRasterPipeline_opts.h.

3041 {
3042 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
3043 from_1010102(load<U32>(ptr), &dr,&dg,&db,&da);
3044}

◆ STAGE() [82/188]

SK_OPTS_NS::STAGE ( load_1010102_xr  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3045 of file SkRasterPipeline_opts.h.

3045 {
3046 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
3047 from_1010102_xr(load<U32>(ptr), &r,&g,&b,&a);
3048}

◆ STAGE() [83/188]

SK_OPTS_NS::STAGE ( load_1010102_xr_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3049 of file SkRasterPipeline_opts.h.

3049 {
3050 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
3051 from_1010102_xr(load<U32>(ptr), &dr,&dg,&db,&da);
3052}

◆ STAGE() [84/188]

SK_OPTS_NS::STAGE ( load_10x6  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3012 of file SkRasterPipeline_opts.h.

3012 {
3013 auto ptr = ptr_at_xy<const uint64_t>(ctx, dx, dy);
3014 from_10x6(load<U64>(ptr), &r,&g, &b, &a);
3015}

◆ STAGE() [85/188]

SK_OPTS_NS::STAGE ( load_10x6_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3016 of file SkRasterPipeline_opts.h.

3016 {
3017 auto ptr = ptr_at_xy<const uint64_t>(ctx, dx, dy);
3018 from_10x6(load<U64>(ptr), &dr, &dg, &db, &da);
3019}

◆ STAGE() [86/188]

SK_OPTS_NS::STAGE ( load_16161616  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2988 of file SkRasterPipeline_opts.h.

2988 {
2989 auto ptr = ptr_at_xy<const uint64_t>(ctx, dx, dy);
2990 from_16161616(load<U64>(ptr), &r,&g, &b, &a);
2991}

◆ STAGE() [87/188]

SK_OPTS_NS::STAGE ( load_16161616_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2992 of file SkRasterPipeline_opts.h.

2992 {
2993 auto ptr = ptr_at_xy<const uint64_t>(ctx, dx, dy);
2994 from_16161616(load<U64>(ptr), &dr, &dg, &db, &da);
2995}

◆ STAGE() [88/188]

SK_OPTS_NS::STAGE ( load_4444  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2868 of file SkRasterPipeline_opts.h.

2868 {
2869 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2870 from_4444(load<U16>(ptr), &r,&g,&b,&a);
2871}

◆ STAGE() [89/188]

SK_OPTS_NS::STAGE ( load_4444_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2872 of file SkRasterPipeline_opts.h.

2872 {
2873 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2874 from_4444(load<U16>(ptr), &dr,&dg,&db,&da);
2875}

◆ STAGE() [90/188]

SK_OPTS_NS::STAGE ( load_565  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2841 of file SkRasterPipeline_opts.h.

2841 {
2842 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2843
2844 from_565(load<U16>(ptr), &r,&g,&b);
2845 a = F1;
2846}

◆ STAGE() [91/188]

SK_OPTS_NS::STAGE ( load_565_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2847 of file SkRasterPipeline_opts.h.

2847 {
2848 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2849
2850 from_565(load<U16>(ptr), &dr,&dg,&db);
2851 da = F1;
2852}

◆ STAGE() [92/188]

SK_OPTS_NS::STAGE ( load_8888  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2890 of file SkRasterPipeline_opts.h.

2890 {
2891 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
2892 from_8888(load<U32>(ptr), &r,&g,&b,&a);
2893}

◆ STAGE() [93/188]

SK_OPTS_NS::STAGE ( load_8888_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2894 of file SkRasterPipeline_opts.h.

2894 {
2895 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
2896 from_8888(load<U32>(ptr), &dr,&dg,&db,&da);
2897}

◆ STAGE() [94/188]

SK_OPTS_NS::STAGE ( load_a16  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2938 of file SkRasterPipeline_opts.h.

2938 {
2939 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2940 r = g = b = F0;
2941 a = from_short(load<U16>(ptr));
2942}

◆ STAGE() [95/188]

SK_OPTS_NS::STAGE ( load_a16_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2943 of file SkRasterPipeline_opts.h.

2943 {
2944 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
2945 dr = dg = db = F0;
2946 da = from_short(load<U16>(ptr));
2947}

◆ STAGE() [96/188]

SK_OPTS_NS::STAGE ( load_a8  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2810 of file SkRasterPipeline_opts.h.

2810 {
2811 auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
2812
2813 r = g = b = F0;
2814 a = from_byte(load<U8>(ptr));
2815}

◆ STAGE() [97/188]

SK_OPTS_NS::STAGE ( load_a8_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2816 of file SkRasterPipeline_opts.h.

2816 {
2817 auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
2818
2819 dr = dg = db = F0;
2820 da = from_byte(load<U8>(ptr));
2821}

◆ STAGE() [98/188]

SK_OPTS_NS::STAGE ( load_af16  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3150 of file SkRasterPipeline_opts.h.

3150 {
3151 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
3152
3153 U16 A = load<U16>((const uint16_t*)ptr);
3154 r = F0;
3155 g = F0;
3156 b = F0;
3157 a = from_half(A);
3158}

◆ STAGE() [99/188]

SK_OPTS_NS::STAGE ( load_af16_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3159 of file SkRasterPipeline_opts.h.

3159 {
3160 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
3161
3162 U16 A = load<U16>((const uint16_t*)ptr);
3163 dr = dg = db = F0;
3164 da = from_half(A);
3165}

◆ STAGE() [100/188]

SK_OPTS_NS::STAGE ( load_dst  ,
const float *  ptr 
)

Definition at line 2145 of file SkRasterPipeline_opts.h.

2145 {
2146 dr = sk_unaligned_load<F>(ptr + 0*N);
2147 dg = sk_unaligned_load<F>(ptr + 1*N);
2148 db = sk_unaligned_load<F>(ptr + 2*N);
2149 da = sk_unaligned_load<F>(ptr + 3*N);
2150}

◆ STAGE() [101/188]

SK_OPTS_NS::STAGE ( load_f16  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3110 of file SkRasterPipeline_opts.h.

3110 {
3111 auto ptr = ptr_at_xy<const uint64_t>(ctx, dx,dy);
3112
3113 U16 R,G,B,A;
3114 load4((const uint16_t*)ptr, &R,&G,&B,&A);
3115 r = from_half(R);
3116 g = from_half(G);
3117 b = from_half(B);
3118 a = from_half(A);
3119}

◆ STAGE() [102/188]

SK_OPTS_NS::STAGE ( load_f16_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3120 of file SkRasterPipeline_opts.h.

3120 {
3121 auto ptr = ptr_at_xy<const uint64_t>(ctx, dx,dy);
3122
3123 U16 R,G,B,A;
3124 load4((const uint16_t*)ptr, &R,&G,&B,&A);
3125 dr = from_half(R);
3126 dg = from_half(G);
3127 db = from_half(B);
3128 da = from_half(A);
3129}

◆ STAGE() [103/188]

SK_OPTS_NS::STAGE ( load_f32  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3215 of file SkRasterPipeline_opts.h.

3215 {
3216 auto ptr = ptr_at_xy<const float>(ctx, 4*dx,4*dy);
3217 load4(ptr, &r,&g,&b,&a);
3218}

◆ STAGE() [104/188]

SK_OPTS_NS::STAGE ( load_f32_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3219 of file SkRasterPipeline_opts.h.

3219 {
3220 auto ptr = ptr_at_xy<const float>(ctx, 4*dx,4*dy);
3221 load4(ptr, &dr,&dg,&db,&da);
3222}

◆ STAGE() [105/188]

SK_OPTS_NS::STAGE ( load_rg1616  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2961 of file SkRasterPipeline_opts.h.

2961 {
2962 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
2963 b = F0;
2964 a = F1;
2965 from_1616(load<U32>(ptr), &r,&g);
2966}

◆ STAGE() [106/188]

SK_OPTS_NS::STAGE ( load_rg1616_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2967 of file SkRasterPipeline_opts.h.

2967 {
2968 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
2969 from_1616(load<U32>(ptr), &dr, &dg);
2970 db = F0;
2971 da = F1;
2972}

◆ STAGE() [107/188]

SK_OPTS_NS::STAGE ( load_rg88  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2913 of file SkRasterPipeline_opts.h.

2913 {
2914 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
2915 from_88(load<U16>(ptr), &r, &g);
2916 b = F0;
2917 a = F1;
2918}

◆ STAGE() [108/188]

SK_OPTS_NS::STAGE ( load_rg88_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2919 of file SkRasterPipeline_opts.h.

2919 {
2920 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
2921 from_88(load<U16>(ptr), &dr, &dg);
2922 db = F0;
2923 da = F1;
2924}

◆ STAGE() [109/188]

SK_OPTS_NS::STAGE ( load_rgf16  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3177 of file SkRasterPipeline_opts.h.

3177 {
3178 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
3179
3180 U16 R,G;
3181 load2((const uint16_t*)ptr, &R, &G);
3182 r = from_half(R);
3183 g = from_half(G);
3184 b = F0;
3185 a = F1;
3186}

◆ STAGE() [110/188]

SK_OPTS_NS::STAGE ( load_rgf16_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3187 of file SkRasterPipeline_opts.h.

3187 {
3188 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
3189
3190 U16 R,G;
3191 load2((const uint16_t*)ptr, &R, &G);
3192 dr = from_half(R);
3193 dg = from_half(G);
3194 db = F0;
3195 da = F1;
3196}

◆ STAGE() [111/188]

SK_OPTS_NS::STAGE ( load_src  ,
const float *  ptr 
)

Definition at line 2115 of file SkRasterPipeline_opts.h.

2115 {
2116 r = sk_unaligned_load<F>(ptr + 0*N);
2117 g = sk_unaligned_load<F>(ptr + 1*N);
2118 b = sk_unaligned_load<F>(ptr + 2*N);
2119 a = sk_unaligned_load<F>(ptr + 3*N);
2120}

◆ STAGE() [112/188]

SK_OPTS_NS::STAGE ( load_src_rg  ,
float *  ptr 
)

Definition at line 2135 of file SkRasterPipeline_opts.h.

2135 {
2136 r = sk_unaligned_load<F>(ptr + 0*N);
2137 g = sk_unaligned_load<F>(ptr + 1*N);
2138}

◆ STAGE() [113/188]

SK_OPTS_NS::STAGE ( luminosity  ,
NoCtx   
)

Definition at line 2333 of file SkRasterPipeline_opts.h.

2333 {
2334 F R = dr*a,
2335 G = dg*a,
2336 B = db*a;
2337
2338 set_lum(&R, &G, &B, lum(r,g,b)*da);
2339 clip_color(&R,&G,&B, a*da);
2340
2341 r = mad(r, inv(da), mad(dr, inv(a), R));
2342 g = mad(g, inv(da), mad(dg, inv(a), G));
2343 b = mad(b, inv(da), mad(db, inv(a), B));
2344 a = a + nmad(a, da, da);
2345}

◆ STAGE() [114/188]

SK_OPTS_NS::STAGE ( mask_2pt_conical_degenerates  ,
SkRasterPipeline_2PtConicalCtx c 
)

Definition at line 3550 of file SkRasterPipeline_opts.h.

3550 {
3551 F& t = r;
3552 auto is_degenerate = (t <= 0) | (t != t);
3553 t = if_then_else(is_degenerate, F0, t);
3555}
static bool is_degenerate(const SkPath &path)
Definition SkPath.cpp:71
uint32_t fMask[SkRasterPipeline_kMaxStride_highp]

◆ STAGE() [115/188]

SK_OPTS_NS::STAGE ( mask_2pt_conical_nan  ,
SkRasterPipeline_2PtConicalCtx c 
)

Definition at line 3543 of file SkRasterPipeline_opts.h.

3543 {
3544 F& t = r;
3545 auto is_degenerate = (t != t); // NaN
3546 t = if_then_else(is_degenerate, F0, t);
3548}

◆ STAGE() [116/188]

SK_OPTS_NS::STAGE ( matrix_2x3  ,
const float *  m 
)

Definition at line 3341 of file SkRasterPipeline_opts.h.

3341 {
3342 auto R = mad(r,m[0], mad(g,m[1], m[2])),
3343 G = mad(r,m[3], mad(g,m[4], m[5]));
3344 r = R;
3345 g = G;
3346}

◆ STAGE() [117/188]

SK_OPTS_NS::STAGE ( matrix_3x3  ,
const float *  m 
)

Definition at line 3347 of file SkRasterPipeline_opts.h.

3347 {
3348 auto R = mad(r,m[0], mad(g,m[3], b*m[6])),
3349 G = mad(r,m[1], mad(g,m[4], b*m[7])),
3350 B = mad(r,m[2], mad(g,m[5], b*m[8]));
3351 r = R;
3352 g = G;
3353 b = B;
3354}

◆ STAGE() [118/188]

SK_OPTS_NS::STAGE ( matrix_3x4  ,
const float *  m 
)

Definition at line 3355 of file SkRasterPipeline_opts.h.

3355 {
3356 auto R = mad(r,m[0], mad(g,m[3], mad(b,m[6], m[ 9]))),
3357 G = mad(r,m[1], mad(g,m[4], mad(b,m[7], m[10]))),
3358 B = mad(r,m[2], mad(g,m[5], mad(b,m[8], m[11])));
3359 r = R;
3360 g = G;
3361 b = B;
3362}

◆ STAGE() [119/188]

SK_OPTS_NS::STAGE ( matrix_4x3  ,
const float *  m 
)

Definition at line 3373 of file SkRasterPipeline_opts.h.

3373 {
3374 auto X = r,
3375 Y = g;
3376
3377 r = mad(X, m[0], mad(Y, m[4], m[ 8]));
3378 g = mad(X, m[1], mad(Y, m[5], m[ 9]));
3379 b = mad(X, m[2], mad(Y, m[6], m[10]));
3380 a = mad(X, m[3], mad(Y, m[7], m[11]));
3381}

◆ STAGE() [120/188]

SK_OPTS_NS::STAGE ( matrix_4x5  ,
const float *  m 
)

Definition at line 3363 of file SkRasterPipeline_opts.h.

3363 {
3364 auto R = mad(r,m[ 0], mad(g,m[ 1], mad(b,m[ 2], mad(a,m[ 3], m[ 4])))),
3365 G = mad(r,m[ 5], mad(g,m[ 6], mad(b,m[ 7], mad(a,m[ 8], m[ 9])))),
3366 B = mad(r,m[10], mad(g,m[11], mad(b,m[12], mad(a,m[13], m[14])))),
3367 A = mad(r,m[15], mad(g,m[16], mad(b,m[17], mad(a,m[18], m[19]))));
3368 r = R;
3369 g = G;
3370 b = B;
3371 a = A;
3372}

◆ STAGE() [121/188]

SK_OPTS_NS::STAGE ( matrix_perspective  ,
const float *  m 
)

Definition at line 3382 of file SkRasterPipeline_opts.h.

3382 {
3383 // N.B. Unlike the other matrix_ stages, this matrix is row-major.
3384 auto R = mad(r,m[0], mad(g,m[1], m[2])),
3385 G = mad(r,m[3], mad(g,m[4], m[5])),
3386 Z = mad(r,m[6], mad(g,m[7], m[8]));
3387 r = R * rcp_precise(Z);
3388 g = G * rcp_precise(Z);
3389}
#define Z

◆ STAGE() [122/188]

SK_OPTS_NS::STAGE ( matrix_scale_translate  ,
const float *  m 
)

Definition at line 3337 of file SkRasterPipeline_opts.h.

3337 {
3338 r = mad(r,m[0], m[2]);
3339 g = mad(g,m[1], m[3]);
3340}

◆ STAGE() [123/188]

SK_OPTS_NS::STAGE ( matrix_translate  ,
const float *  m 
)

Definition at line 3333 of file SkRasterPipeline_opts.h.

3333 {
3334 r += m[0];
3335 g += m[1];
3336}

◆ STAGE() [124/188]

SK_OPTS_NS::STAGE ( mipmap_linear_finish  ,
SkRasterPipeline_MipmapCtx ctx 
)

Definition at line 3825 of file SkRasterPipeline_opts.h.

3825 {
3826 r = lerp(sk_unaligned_load<F>(ctx->r), r, F_(ctx->lowerWeight));
3827 g = lerp(sk_unaligned_load<F>(ctx->g), g, F_(ctx->lowerWeight));
3828 b = lerp(sk_unaligned_load<F>(ctx->b), b, F_(ctx->lowerWeight));
3829 a = lerp(sk_unaligned_load<F>(ctx->a), a, F_(ctx->lowerWeight));
3830}
float g[SkRasterPipeline_kMaxStride_highp]
float r[SkRasterPipeline_kMaxStride_highp]
float b[SkRasterPipeline_kMaxStride_highp]
float a[SkRasterPipeline_kMaxStride_highp]

◆ STAGE() [125/188]

SK_OPTS_NS::STAGE ( mipmap_linear_init  ,
SkRasterPipeline_MipmapCtx ctx 
)

Definition at line 3810 of file SkRasterPipeline_opts.h.

3810 {
3811 sk_unaligned_store(ctx->x, r);
3812 sk_unaligned_store(ctx->y, g);
3813}
float x[SkRasterPipeline_kMaxStride_highp]
float y[SkRasterPipeline_kMaxStride_highp]

◆ STAGE() [126/188]

SK_OPTS_NS::STAGE ( mipmap_linear_update  ,
SkRasterPipeline_MipmapCtx ctx 
)

Definition at line 3815 of file SkRasterPipeline_opts.h.

3815 {
3816 sk_unaligned_store(ctx->r, r);
3817 sk_unaligned_store(ctx->g, g);
3818 sk_unaligned_store(ctx->b, b);
3819 sk_unaligned_store(ctx->a, a);
3820
3821 r = sk_unaligned_load<F>(ctx->x) * ctx->scaleX;
3822 g = sk_unaligned_load<F>(ctx->y) * ctx->scaleY;
3823}

◆ STAGE() [127/188]

SK_OPTS_NS::STAGE ( mirror_x  ,
const SkRasterPipeline_TileCtx ctx 
)

Definition at line 3261 of file SkRasterPipeline_opts.h.

3261{ r = exclusive_mirror(r, ctx); }
SI F exclusive_mirror(F v, const SkRasterPipeline_TileCtx *ctx)

◆ STAGE() [128/188]

SK_OPTS_NS::STAGE ( mirror_x_1  ,
NoCtx   
)

Definition at line 3266 of file SkRasterPipeline_opts.h.

3266{ r = clamp_01_(abs_( (r-1.0f) - two(floor_((r-1.0f)*0.5f)) - 1.0f )); }

◆ STAGE() [129/188]

SK_OPTS_NS::STAGE ( mirror_y  ,
const SkRasterPipeline_TileCtx ctx 
)

Definition at line 3262 of file SkRasterPipeline_opts.h.

3262{ g = exclusive_mirror(g, ctx); }

◆ STAGE() [130/188]

SK_OPTS_NS::STAGE ( move_dst_src  ,
NoCtx   
)

Definition at line 2417 of file SkRasterPipeline_opts.h.

2417 {
2418 r = dr;
2419 g = dg;
2420 b = db;
2421 a = da;
2422}

◆ STAGE() [131/188]

SK_OPTS_NS::STAGE ( move_src_dst  ,
NoCtx   
)

Definition at line 2411 of file SkRasterPipeline_opts.h.

2411 {
2412 dr = r;
2413 dg = g;
2414 db = b;
2415 da = a;
2416}

◆ STAGE() [132/188]

SK_OPTS_NS::STAGE ( negate_x  ,
NoCtx   
)

Definition at line 3506 of file SkRasterPipeline_opts.h.

3506{ r = -r; }

◆ STAGE() [133/188]

SK_OPTS_NS::STAGE ( parametric  ,
const skcms_TransferFunction ctx 
)

Definition at line 2730 of file SkRasterPipeline_opts.h.

2730 {
2731 auto fn = [&](F v) {
2732 U32 sign;
2733 v = strip_sign(v, &sign);
2734
2735 F r = if_then_else(v <= ctx->d, mad(ctx->c, v, ctx->f)
2736 , approx_powf(mad(ctx->a, v, ctx->b), ctx->g) + ctx->e);
2737 return apply_sign(r, sign);
2738 };
2739 r = fn(r);
2740 g = fn(g);
2741 b = fn(b);
2742}

◆ STAGE() [134/188]

SK_OPTS_NS::STAGE ( perlin_noise  ,
SkRasterPipeline_PerlinNoiseCtx ctx 
)

Definition at line 3711 of file SkRasterPipeline_opts.h.

3711 {
3712 F noiseVecX = (r + 0.5) * ctx->baseFrequencyX;
3713 F noiseVecY = (g + 0.5) * ctx->baseFrequencyY;
3714 r = g = b = a = F0;
3715 F stitchDataX = F_(ctx->stitchDataInX);
3716 F stitchDataY = F_(ctx->stitchDataInY);
3717 F ratio = F1;
3718
3719 for (int octave = 0; octave < ctx->numOctaves; ++octave) {
3720 // Calculate noise coordinates. (Roughly $noise_helper in Graphite)
3721 F floorValX = floor_(noiseVecX);
3722 F floorValY = floor_(noiseVecY);
3723 F ceilValX = floorValX + 1.0f;
3724 F ceilValY = floorValY + 1.0f;
3725 F fractValX = noiseVecX - floorValX;
3726 F fractValY = noiseVecY - floorValY;
3727
3728 if (ctx->stitching) {
3729 // If we are stitching, wrap the coordinates to the stitch position.
3730 floorValX -= sk_bit_cast<F>(cond_to_mask(floorValX >= stitchDataX) &
3731 sk_bit_cast<I32>(stitchDataX));
3732 floorValY -= sk_bit_cast<F>(cond_to_mask(floorValY >= stitchDataY) &
3733 sk_bit_cast<I32>(stitchDataY));
3734 ceilValX -= sk_bit_cast<F>(cond_to_mask(ceilValX >= stitchDataX) &
3735 sk_bit_cast<I32>(stitchDataX));
3736 ceilValY -= sk_bit_cast<F>(cond_to_mask(ceilValY >= stitchDataY) &
3737 sk_bit_cast<I32>(stitchDataY));
3738 }
3739
3740 U32 latticeLookup = (U32)(iround(floorValX)) & 0xFF;
3741 F latticeIdxX = cast(expand(gather(ctx->latticeSelector, latticeLookup)));
3742 latticeLookup = (U32)(iround(ceilValX)) & 0xFF;
3743 F latticeIdxY = cast(expand(gather(ctx->latticeSelector, latticeLookup)));
3744
3745 U32 b00 = (U32)(iround(latticeIdxX + floorValY)) & 0xFF;
3746 U32 b10 = (U32)(iround(latticeIdxY + floorValY)) & 0xFF;
3747 U32 b01 = (U32)(iround(latticeIdxX + ceilValY)) & 0xFF;
3748 U32 b11 = (U32)(iround(latticeIdxY + ceilValY)) & 0xFF;
3749
3750 // Calculate noise colors. (Roughly $noise_function in Graphite)
3751 // Apply Hermite interpolation to the fractional value.
3752 F smoothX = fractValX * fractValX * (3.0f - 2.0f * fractValX);
3753 F smoothY = fractValY * fractValY * (3.0f - 2.0f * fractValY);
3754
3755 F color[4];
3756 const uint32_t* channelNoiseData = reinterpret_cast<const uint32_t*>(ctx->noiseData);
3757 for (int channel = 0; channel < 4; ++channel) {
3758 U32 sample00 = gather(channelNoiseData, b00);
3759 U32 sample10 = gather(channelNoiseData, b10);
3760 U32 sample01 = gather(channelNoiseData, b01);
3761 U32 sample11 = gather(channelNoiseData, b11);
3762 channelNoiseData += 256;
3763
3764 F u = compute_perlin_vector(sample00, fractValX, fractValY);
3765 F v = compute_perlin_vector(sample10, fractValX - 1.0f, fractValY);
3766 F A = lerp(u, v, smoothX);
3767
3768 u = compute_perlin_vector(sample01, fractValX, fractValY - 1.0f);
3769 v = compute_perlin_vector(sample11, fractValX - 1.0f, fractValY - 1.0f);
3770 F B = lerp(u, v, smoothX);
3771
3772 color[channel] = lerp(A, B, smoothY);
3773 }
3774
3776 // For kTurbulence the result is: abs(noise[-1,1])
3777 color[0] = abs_(color[0]);
3778 color[1] = abs_(color[1]);
3779 color[2] = abs_(color[2]);
3780 color[3] = abs_(color[3]);
3781 }
3782
3783 r = mad(color[0], ratio, r);
3784 g = mad(color[1], ratio, g);
3785 b = mad(color[2], ratio, b);
3786 a = mad(color[3], ratio, a);
3787
3788 // Scale inputs for the next round.
3789 noiseVecX *= 2.0f;
3790 noiseVecY *= 2.0f;
3791 stitchDataX *= 2.0f;
3792 stitchDataY *= 2.0f;
3793 ratio *= 0.5f;
3794 }
3795
3797 // For kFractalNoise the result is: noise[-1,1] * 0.5 + 0.5
3798 r = mad(r, 0.5f, 0.5f);
3799 g = mad(g, 0.5f, 0.5f);
3800 b = mad(b, 0.5f, 0.5f);
3801 a = mad(a, 0.5f, 0.5f);
3802 }
3803
3804 r = clamp_01_(r) * a;
3805 g = clamp_01_(g) * a;
3806 b = clamp_01_(b) * a;
3807 a = clamp_01_(a);
3808}

◆ STAGE() [135/188]

SK_OPTS_NS::STAGE ( PQish  ,
const skcms_TransferFunction ctx 
)

Definition at line 2755 of file SkRasterPipeline_opts.h.

2755 {
2756 auto fn = [&](F v) {
2757 U32 sign;
2758 v = strip_sign(v, &sign);
2759
2760 F r = approx_powf(max(mad(ctx->b, approx_powf(v, ctx->c), ctx->a), 0.0f)
2761 / (mad(ctx->e, approx_powf(v, ctx->c), ctx->d)),
2762 ctx->f);
2763
2764 return apply_sign(r, sign);
2765 };
2766 r = fn(r);
2767 g = fn(g);
2768 b = fn(b);
2769}

◆ STAGE() [136/188]

SK_OPTS_NS::STAGE ( premul  ,
NoCtx   
)

Definition at line 2430 of file SkRasterPipeline_opts.h.

2430 {
2431 r = r * a;
2432 g = g * a;
2433 b = b * a;
2434}

◆ STAGE() [137/188]

SK_OPTS_NS::STAGE ( premul_dst  ,
NoCtx   
)

Definition at line 2435 of file SkRasterPipeline_opts.h.

2435 {
2436 dr = dr * da;
2437 dg = dg * da;
2438 db = db * da;
2439}

◆ STAGE() [138/188]

SK_OPTS_NS::STAGE ( repeat_x  ,
const SkRasterPipeline_TileCtx ctx 
)

Definition at line 3259 of file SkRasterPipeline_opts.h.

3259{ r = exclusive_repeat(r, ctx); }
SI F exclusive_repeat(F v, const SkRasterPipeline_TileCtx *ctx)

◆ STAGE() [139/188]

SK_OPTS_NS::STAGE ( repeat_x_1  ,
NoCtx   
)

Definition at line 3265 of file SkRasterPipeline_opts.h.

3265{ r = clamp_01_(r - floor_(r)); }

◆ STAGE() [140/188]

SK_OPTS_NS::STAGE ( repeat_y  ,
const SkRasterPipeline_TileCtx ctx 
)

Definition at line 3260 of file SkRasterPipeline_opts.h.

3260{ g = exclusive_repeat(g, ctx); }

◆ STAGE() [141/188]

SK_OPTS_NS::STAGE ( rgb_to_hsl  ,
NoCtx   
)

Definition at line 2457 of file SkRasterPipeline_opts.h.

2457 {
2458 F mx = max(r, max(g,b)),
2459 mn = min(r, min(g,b)),
2460 d = mx - mn,
2461 d_rcp = 1.0f / d;
2462
2463 F h = (1/6.0f) *
2464 if_then_else(mx == mn, 0.0f,
2465 if_then_else(mx == r, (g-b)*d_rcp + if_then_else(g < b, 6.0f, 0.0f),
2466 if_then_else(mx == g, (b-r)*d_rcp + 2.0f,
2467 (r-g)*d_rcp + 4.0f)));
2468
2469 F l = (mx + mn) * 0.5f;
2470 F s = if_then_else(mx == mn, 0.0f,
2471 d / if_then_else(l > 0.5f, 2.0f-mx-mn, mx+mn));
2472
2473 r = h;
2474 g = s;
2475 b = l;
2476}

◆ STAGE() [142/188]

SK_OPTS_NS::STAGE ( saturation  ,
NoCtx   
)

Definition at line 2306 of file SkRasterPipeline_opts.h.

2306 {
2307 F R = dr*a,
2308 G = dg*a,
2309 B = db*a;
2310
2311 set_sat(&R, &G, &B, sat( r, g, b)*da);
2312 set_lum(&R, &G, &B, lum(dr,dg,db)* a); // (This is not redundant.)
2313 clip_color(&R,&G,&B, a*da);
2314
2315 r = mad(r, inv(da), mad(dr, inv(a), R));
2316 g = mad(g, inv(da), mad(dg, inv(a), G));
2317 b = mad(b, inv(da), mad(db, inv(a), B));
2318 a = a + nmad(a, da, da);
2319}

◆ STAGE() [143/188]

SK_OPTS_NS::STAGE ( scale_1_float  ,
const float *  c 
)

Definition at line 2621 of file SkRasterPipeline_opts.h.

2621 {
2622 r = r * *c;
2623 g = g * *c;
2624 b = b * *c;
2625 a = a * *c;
2626}

◆ STAGE() [144/188]

SK_OPTS_NS::STAGE ( scale_565  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2638 of file SkRasterPipeline_opts.h.

2638 {
2639 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2640
2641 F cr,cg,cb;
2642 from_565(load<U16>(ptr), &cr, &cg, &cb);
2643
2644 F ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
2645
2646 r = r * cr;
2647 g = g * cg;
2648 b = b * cb;
2649 a = a * ca;
2650}

◆ STAGE() [145/188]

SK_OPTS_NS::STAGE ( scale_native  ,
const float  scales[] 
)

Definition at line 2662 of file SkRasterPipeline_opts.h.

2662 {
2663 auto c = sk_unaligned_load<F>(scales);
2664 r = r * c;
2665 g = g * c;
2666 b = b * c;
2667 a = a * c;
2668}

◆ STAGE() [146/188]

SK_OPTS_NS::STAGE ( scale_u8  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2627 of file SkRasterPipeline_opts.h.

2627 {
2628 auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
2629
2630 auto scales = load<U8>(ptr);
2631 auto c = from_byte(scales);
2632
2633 r = r * c;
2634 g = g * c;
2635 b = b * c;
2636 a = a * c;
2637}

◆ STAGE() [147/188]

SK_OPTS_NS::STAGE ( seed_shader  ,
NoCtx   
)

Definition at line 2033 of file SkRasterPipeline_opts.h.

2033 {
2034 static constexpr float iota[] = {
2035 0.5f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5f, 6.5f, 7.5f,
2036 8.5f, 9.5f,10.5f,11.5f,12.5f,13.5f,14.5f,15.5f,
2037 };
2038 static_assert(std::size(iota) >= SkRasterPipeline_kMaxStride_highp);
2039
2040 // It's important for speed to explicitly cast(dx) and cast(dy),
2041 // which has the effect of splatting them to vectors before converting to floats.
2042 // On Intel this breaks a data dependency on previous loop iterations' registers.
2043 r = cast(U32_(dx)) + sk_unaligned_load<F>(iota);
2044 g = cast(U32_(dy)) + 0.5f;
2045 b = F1; // This is w=1 for matrix multiplies by the device coords.
2046 a = F0;
2047}

◆ STAGE() [148/188]

SK_OPTS_NS::STAGE ( set_rgb  ,
const float *  rgb 
)

Definition at line 2388 of file SkRasterPipeline_opts.h.

2388 {
2389 r = F_(rgb[0]);
2390 g = F_(rgb[1]);
2391 b = F_(rgb[2]);
2392}

◆ STAGE() [149/188]

SK_OPTS_NS::STAGE ( srcover_rgba_8888  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2347 of file SkRasterPipeline_opts.h.

2347 {
2348 auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
2349
2350 U32 dst = load<U32>(ptr);
2351 dr = cast((dst ) & 0xff);
2352 dg = cast((dst >> 8) & 0xff);
2353 db = cast((dst >> 16) & 0xff);
2354 da = cast((dst >> 24) );
2355 // {dr,dg,db,da} are in [0,255]
2356 // { r, g, b, a} are in [0, 1] (but may be out of gamut)
2357
2358 r = mad(dr, inv(a), r*255.0f);
2359 g = mad(dg, inv(a), g*255.0f);
2360 b = mad(db, inv(a), b*255.0f);
2361 a = mad(da, inv(a), a*255.0f);
2362 // { r, g, b, a} are now in [0,255] (but may be out of gamut)
2363
2364 // to_unorm() clamps back to gamut. Scaling by 1 since we're already 255-biased.
2365 dst = to_unorm(r, 1, 255)
2366 | to_unorm(g, 1, 255) << 8
2367 | to_unorm(b, 1, 255) << 16
2368 | to_unorm(a, 1, 255) << 24;
2369 store(ptr, dst);
2370}
SI void store(P *ptr, const T &val)

◆ STAGE() [150/188]

SK_OPTS_NS::STAGE ( store_10101010_xr  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3076 of file SkRasterPipeline_opts.h.

3076 {
3077 static constexpr float min = -0.752941f;
3078 static constexpr float max = 1.25098f;
3079 static constexpr float range = max - min;
3080 auto ptr = ptr_at_xy<uint16_t>(ctx, 4*dx,4*dy);
3081
3082 U16 R = pack(to_unorm((r - min) / range, 1023)) << 6,
3083 G = pack(to_unorm((g - min) / range, 1023)) << 6,
3084 B = pack(to_unorm((b - min) / range, 1023)) << 6,
3085 A = pack(to_unorm((a - min) / range, 1023)) << 6;
3086
3087 store4(ptr, R,G,B,A);
3088}
static uint32_t pack(SkFixed f, unsigned max, SkFixed one)

◆ STAGE() [151/188]

SK_OPTS_NS::STAGE ( store_1010102  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3089 of file SkRasterPipeline_opts.h.

3089 {
3090 auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
3091
3092 U32 px = to_unorm(r, 1023)
3093 | to_unorm(g, 1023) << 10
3094 | to_unorm(b, 1023) << 20
3095 | to_unorm(a, 3) << 30;
3096 store(ptr, px);
3097}

◆ STAGE() [152/188]

SK_OPTS_NS::STAGE ( store_1010102_xr  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3098 of file SkRasterPipeline_opts.h.

3098 {
3099 auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
3100 static constexpr float min = -0.752941f;
3101 static constexpr float max = 1.25098f;
3102 static constexpr float range = max - min;
3103 U32 px = to_unorm((r - min) / range, 1023)
3104 | to_unorm((g - min) / range, 1023) << 10
3105 | to_unorm((b - min) / range, 1023) << 20
3106 | to_unorm(a, 3) << 30;
3107 store(ptr, px);
3108}

◆ STAGE() [153/188]

SK_OPTS_NS::STAGE ( store_10x6  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3025 of file SkRasterPipeline_opts.h.

3025 {
3026 auto ptr = ptr_at_xy<uint16_t>(ctx, 4*dx,4*dy);
3027
3028 U16 R = pack(to_unorm(r, 1023)) << 6,
3029 G = pack(to_unorm(g, 1023)) << 6,
3030 B = pack(to_unorm(b, 1023)) << 6,
3031 A = pack(to_unorm(a, 1023)) << 6;
3032
3033 store4(ptr, R,G,B,A);
3034}

◆ STAGE() [154/188]

SK_OPTS_NS::STAGE ( store_16161616  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3001 of file SkRasterPipeline_opts.h.

3001 {
3002 auto ptr = ptr_at_xy<uint16_t>(ctx, 4*dx,4*dy);
3003
3004 U16 R = pack(to_unorm(r, 65535)),
3005 G = pack(to_unorm(g, 65535)),
3006 B = pack(to_unorm(b, 65535)),
3007 A = pack(to_unorm(a, 65535));
3008
3009 store4(ptr, R,G,B,A);
3010}

◆ STAGE() [155/188]

SK_OPTS_NS::STAGE ( store_4444  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2881 of file SkRasterPipeline_opts.h.

2881 {
2882 auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
2883 U16 px = pack( to_unorm(r, 15) << 12
2884 | to_unorm(g, 15) << 8
2885 | to_unorm(b, 15) << 4
2886 | to_unorm(a, 15) );
2887 store(ptr, px);
2888}

◆ STAGE() [156/188]

SK_OPTS_NS::STAGE ( store_565  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2859 of file SkRasterPipeline_opts.h.

2859 {
2860 auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
2861
2862 U16 px = pack( to_unorm(r, 31) << 11
2863 | to_unorm(g, 63) << 5
2864 | to_unorm(b, 31) );
2865 store(ptr, px);
2866}

◆ STAGE() [157/188]

SK_OPTS_NS::STAGE ( store_8888  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2903 of file SkRasterPipeline_opts.h.

2903 {
2904 auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
2905
2906 U32 px = to_unorm(r, 255)
2907 | to_unorm(g, 255) << 8
2908 | to_unorm(b, 255) << 16
2909 | to_unorm(a, 255) << 24;
2910 store(ptr, px);
2911}

◆ STAGE() [158/188]

SK_OPTS_NS::STAGE ( store_a16  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2954 of file SkRasterPipeline_opts.h.

2954 {
2955 auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
2956
2957 U16 px = pack(to_unorm(a, 65535));
2958 store(ptr, px);
2959}

◆ STAGE() [159/188]

SK_OPTS_NS::STAGE ( store_a8  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2828 of file SkRasterPipeline_opts.h.

2828 {
2829 auto ptr = ptr_at_xy<uint8_t>(ctx, dx,dy);
2830
2831 U8 packed = pack(pack(to_unorm(a, 255)));
2832 store(ptr, packed);
2833}

◆ STAGE() [160/188]

SK_OPTS_NS::STAGE ( store_af16  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3172 of file SkRasterPipeline_opts.h.

3172 {
3173 auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
3174 store(ptr, to_half(a));
3175}

◆ STAGE() [161/188]

SK_OPTS_NS::STAGE ( store_dst  ,
float *  ptr 
)

Definition at line 2153 of file SkRasterPipeline_opts.h.

2153 {
2154 sk_unaligned_store(ptr + 0*N, dr);
2155 sk_unaligned_store(ptr + 1*N, dg);
2156 sk_unaligned_store(ptr + 2*N, db);
2157 sk_unaligned_store(ptr + 3*N, da);
2158}

◆ STAGE() [162/188]

SK_OPTS_NS::STAGE ( store_f16  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3142 of file SkRasterPipeline_opts.h.

3142 {
3143 auto ptr = ptr_at_xy<uint64_t>(ctx, dx,dy);
3144 store4((uint16_t*)ptr, to_half(r)
3145 , to_half(g)
3146 , to_half(b)
3147 , to_half(a));
3148}

◆ STAGE() [163/188]

SK_OPTS_NS::STAGE ( store_f32  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3231 of file SkRasterPipeline_opts.h.

3231 {
3232 auto ptr = ptr_at_xy<float>(ctx, 4*dx,4*dy);
3233 store4(ptr, r,g,b,a);
3234}

◆ STAGE() [164/188]

SK_OPTS_NS::STAGE ( store_r8  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2834 of file SkRasterPipeline_opts.h.

2834 {
2835 auto ptr = ptr_at_xy<uint8_t>(ctx, dx,dy);
2836
2837 U8 packed = pack(pack(to_unorm(r, 255)));
2838 store(ptr, packed);
2839}

◆ STAGE() [165/188]

SK_OPTS_NS::STAGE ( store_rg1616  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2980 of file SkRasterPipeline_opts.h.

2980 {
2981 auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
2982
2983 U32 px = to_unorm(r, 65535)
2984 | to_unorm(g, 65535) << 16;
2985 store(ptr, px);
2986}

◆ STAGE() [166/188]

SK_OPTS_NS::STAGE ( store_rg88  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2932 of file SkRasterPipeline_opts.h.

2932 {
2933 auto ptr = ptr_at_xy<uint16_t>(ctx, dx, dy);
2934 U16 px = pack( to_unorm(r, 255) | to_unorm(g, 255) << 8 );
2935 store(ptr, px);
2936}

◆ STAGE() [167/188]

SK_OPTS_NS::STAGE ( store_rgf16  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3209 of file SkRasterPipeline_opts.h.

3209 {
3210 auto ptr = ptr_at_xy<uint32_t>(ctx, dx, dy);
3211 store2((uint16_t*)ptr, to_half(r)
3212 , to_half(g));
3213}
SI void store2(uint16_t *ptr, U16 r, U16 g)

◆ STAGE() [168/188]

SK_OPTS_NS::STAGE ( store_src  ,
float *  ptr 
)

Definition at line 2123 of file SkRasterPipeline_opts.h.

2123 {
2124 sk_unaligned_store(ptr + 0*N, r);
2125 sk_unaligned_store(ptr + 1*N, g);
2126 sk_unaligned_store(ptr + 2*N, b);
2127 sk_unaligned_store(ptr + 3*N, a);
2128}

◆ STAGE() [169/188]

SK_OPTS_NS::STAGE ( store_src_a  ,
float *  ptr 
)

Definition at line 2140 of file SkRasterPipeline_opts.h.

2140 {
2141 sk_unaligned_store(ptr, a);
2142}

◆ STAGE() [170/188]

SK_OPTS_NS::STAGE ( store_src_rg  ,
float *  ptr 
)

Definition at line 2130 of file SkRasterPipeline_opts.h.

2130 {
2131 sk_unaligned_store(ptr + 0*N, r);
2132 sk_unaligned_store(ptr + 1*N, g);
2133}

◆ STAGE() [171/188]

SK_OPTS_NS::STAGE ( swap_rb  ,
NoCtx   
)

Definition at line 2400 of file SkRasterPipeline_opts.h.

2400 {
2401 auto tmp = r;
2402 r = b;
2403 b = tmp;
2404}

◆ STAGE() [172/188]

SK_OPTS_NS::STAGE ( swap_rb_dst  ,
NoCtx   
)

Definition at line 2405 of file SkRasterPipeline_opts.h.

2405 {
2406 auto tmp = dr;
2407 dr = db;
2408 db = tmp;
2409}

◆ STAGE() [173/188]

SK_OPTS_NS::STAGE ( swap_src_dst  ,
NoCtx   
)

Definition at line 2423 of file SkRasterPipeline_opts.h.

2423 {
2424 std::swap(r, dr);
2425 std::swap(g, dg);
2426 std::swap(b, db);
2427 std::swap(a, da);
2428}

◆ STAGE() [174/188]

SK_OPTS_NS::STAGE ( swizzle  ,
void *  ctx 
)

Definition at line 5044 of file SkRasterPipeline_opts.h.

5044 {
5045 auto ir = r, ig = g, ib = b, ia = a;
5046 F* o[] = {&r, &g, &b, &a};
5047 char swiz[4];
5048 memcpy(swiz, &ctx, sizeof(swiz));
5049
5050 for (int i = 0; i < 4; ++i) {
5051 switch (swiz[i]) {
5052 case 'r': *o[i] = ir; break;
5053 case 'g': *o[i] = ig; break;
5054 case 'b': *o[i] = ib; break;
5055 case 'a': *o[i] = ia; break;
5056 case '0': *o[i] = F0; break;
5057 case '1': *o[i] = F1; break;
5058 default: break;
5059 }
5060 }
5061}

◆ STAGE() [175/188]

SK_OPTS_NS::STAGE ( unbounded_set_rgb  ,
const float *  rgb 
)

Definition at line 2394 of file SkRasterPipeline_opts.h.

2394 {
2395 r = F_(rgb[0]);
2396 g = F_(rgb[1]);
2397 b = F_(rgb[2]);
2398}

◆ STAGE() [176/188]

SK_OPTS_NS::STAGE ( unbounded_uniform_color  ,
const SkRasterPipeline_UniformColorCtx c 
)

◆ STAGE() [177/188]

SK_OPTS_NS::STAGE ( uniform_color  ,
const SkRasterPipeline_UniformColorCtx c 
)

Definition at line 2084 of file SkRasterPipeline_opts.h.

2084 {
2085 r = F_(c->r);
2086 g = F_(c->g);
2087 b = F_(c->b);
2088 a = F_(c->a);
2089}

◆ STAGE() [178/188]

SK_OPTS_NS::STAGE ( uniform_color_dst  ,
const SkRasterPipeline_UniformColorCtx c 
)

Definition at line 2097 of file SkRasterPipeline_opts.h.

2097 {
2098 dr = F_(c->r);
2099 dg = F_(c->g);
2100 db = F_(c->b);
2101 da = F_(c->a);
2102}

◆ STAGE() [179/188]

SK_OPTS_NS::STAGE ( unpremul  ,
NoCtx   
)

Definition at line 2440 of file SkRasterPipeline_opts.h.

2440 {
2441 float inf = sk_bit_cast<float>(0x7f800000);
2442 auto scale = if_then_else(1.0f/a < inf, 1.0f/a, 0.0f);
2443 r *= scale;
2444 g *= scale;
2445 b *= scale;
2446}

◆ STAGE() [180/188]

SK_OPTS_NS::STAGE ( unpremul_polar  ,
NoCtx   
)

Definition at line 2447 of file SkRasterPipeline_opts.h.

2447 {
2448 float inf = sk_bit_cast<float>(0x7f800000);
2449 auto scale = if_then_else(1.0f/a < inf, 1.0f/a, 0.0f);
2450 g *= scale;
2451 b *= scale;
2452}

◆ STAGE() [181/188]

SK_OPTS_NS::STAGE ( white_color  ,
NoCtx   
)

Definition at line 2110 of file SkRasterPipeline_opts.h.

2110 {
2111 r = g = b = a = F1;
2112}

◆ STAGE() [182/188]

SK_OPTS_NS::STAGE ( xy_to_2pt_conical_focal_on_circle  ,
NoCtx   
)

Definition at line 3513 of file SkRasterPipeline_opts.h.

3513 {
3514 F x = r, y = g, &t = r;
3515 t = x + y*y / x; // (x^2 + y^2) / x
3516}

◆ STAGE() [183/188]

SK_OPTS_NS::STAGE ( xy_to_2pt_conical_greater  ,
const SkRasterPipeline_2PtConicalCtx ctx 
)

Definition at line 3523 of file SkRasterPipeline_opts.h.

3523 {
3524 F x = r, y = g, &t = r;
3525 t = sqrt_(x*x - y*y) - x * ctx->fP0; // ctx->fP0 = 1/r1
3526}

◆ STAGE() [184/188]

SK_OPTS_NS::STAGE ( xy_to_2pt_conical_smaller  ,
const SkRasterPipeline_2PtConicalCtx ctx 
)

Definition at line 3528 of file SkRasterPipeline_opts.h.

3528 {
3529 F x = r, y = g, &t = r;
3530 t = -sqrt_(x*x - y*y) - x * ctx->fP0; // ctx->fP0 = 1/r1
3531}

◆ STAGE() [185/188]

SK_OPTS_NS::STAGE ( xy_to_2pt_conical_strip  ,
const SkRasterPipeline_2PtConicalCtx ctx 
)

Definition at line 3508 of file SkRasterPipeline_opts.h.

3508 {
3509 F x = r, y = g, &t = r;
3510 t = x + sqrt_(ctx->fP0 - y*y); // ctx->fP0 = r0 * r0
3511}

◆ STAGE() [186/188]

SK_OPTS_NS::STAGE ( xy_to_2pt_conical_well_behaved  ,
const SkRasterPipeline_2PtConicalCtx ctx 
)

Definition at line 3518 of file SkRasterPipeline_opts.h.

3518 {
3519 F x = r, y = g, &t = r;
3520 t = sqrt_(x*x + y*y) - x * ctx->fP0; // ctx->fP0 = 1/r1
3521}

◆ STAGE() [187/188]

SK_OPTS_NS::STAGE ( xy_to_radius  ,
NoCtx   
)

Definition at line 3498 of file SkRasterPipeline_opts.h.

3498 {
3499 F X2 = r * r,
3500 Y2 = g * g;
3501 r = sqrt_(X2 + Y2);
3502}

◆ STAGE() [188/188]

SK_OPTS_NS::STAGE ( xy_to_unit_angle  ,
NoCtx   
)

Definition at line 3472 of file SkRasterPipeline_opts.h.

3472 {
3473 F X = r,
3474 Y = g;
3475 F xabs = abs_(X),
3476 yabs = abs_(Y);
3477
3478 F slope = min(xabs, yabs)/max(xabs, yabs);
3479 F s = slope * slope;
3480
3481 // Use a 7th degree polynomial to approximate atan.
3482 // This was generated using sollya.gforge.inria.fr.
3483 // A float optimized polynomial was generated using the following command.
3484 // P1 = fpminimax((1/(2*Pi))*atan(x),[|1,3,5,7|],[|24...|],[2^(-40),1],relative);
3485 F phi = slope
3486 * (0.15912117063999176025390625f + s
3487 * (-5.185396969318389892578125e-2f + s
3488 * (2.476101927459239959716796875e-2f + s
3489 * (-7.0547382347285747528076171875e-3f))));
3490
3491 phi = if_then_else(xabs < yabs, 1.0f/4.0f - phi, phi);
3492 phi = if_then_else(X < 0.0f , 1.0f/2.0f - phi, phi);
3493 phi = if_then_else(Y < 0.0f , 1.0f - phi , phi);
3494 phi = if_then_else(phi != phi , 0.0f , phi); // Check for NaN.
3495 r = phi;
3496}

◆ STAGE_BRANCH() [1/5]

SK_OPTS_NS::STAGE_BRANCH ( branch_if_all_lanes_active  ,
SkRasterPipeline_BranchIfAllLanesActiveCtx ctx 
)

Definition at line 3980 of file SkRasterPipeline_opts.h.

3980 {
3981 uint32_t iota[] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
3982 static_assert(std::size(iota) >= SkRasterPipeline_kMaxStride_highp);
3983
3984 I32 tailLanes = cond_to_mask(*ctx->tail <= sk_unaligned_load<U32>(iota));
3985 return all(execution_mask() | tailLanes) ? ctx->offset : 1;
3986}
#define execution_mask()

◆ STAGE_BRANCH() [2/5]

SK_OPTS_NS::STAGE_BRANCH ( branch_if_any_lanes_active  ,
SkRasterPipeline_BranchCtx ctx 
)

Definition at line 3988 of file SkRasterPipeline_opts.h.

3988 {
3989 return any(execution_mask()) ? ctx->offset : 1;
3990}

◆ STAGE_BRANCH() [3/5]

SK_OPTS_NS::STAGE_BRANCH ( branch_if_no_active_lanes_eq  ,
SkRasterPipeline_BranchIfEqualCtx ctx 
)

Definition at line 4000 of file SkRasterPipeline_opts.h.

4000 {
4001 // Compare each lane against the expected value...
4002 I32 match = cond_to_mask(*(const I32*)ctx->ptr == ctx->value);
4003 // ... but mask off lanes that aren't executing.
4004 match &= execution_mask();
4005 // If any lanes matched, don't take the branch.
4006 return any(match) ? 1 : ctx->offset;
4007}
static bool match(const char *needle, const char *haystack)
Definition DM.cpp:1132

◆ STAGE_BRANCH() [4/5]

SK_OPTS_NS::STAGE_BRANCH ( branch_if_no_lanes_active  ,
SkRasterPipeline_BranchCtx ctx 
)

Definition at line 3992 of file SkRasterPipeline_opts.h.

3992 {
3993 return any(execution_mask()) ? 1 : ctx->offset;
3994}

◆ STAGE_BRANCH() [5/5]

SK_OPTS_NS::STAGE_BRANCH ( jump  ,
SkRasterPipeline_BranchCtx ctx 
)

Definition at line 3996 of file SkRasterPipeline_opts.h.

3996 {
3997 return ctx->offset;
3998}

◆ STAGE_TAIL() [1/75]

SK_OPTS_NS::STAGE_TAIL ( acos_float  ,
F dst 
)

Definition at line 4450 of file SkRasterPipeline_opts.h.

4450{ *dst = acos_(*dst); }

◆ STAGE_TAIL() [2/75]

SK_OPTS_NS::STAGE_TAIL ( asin_float  ,
F dst 
)

Definition at line 4449 of file SkRasterPipeline_opts.h.

4449{ *dst = asin_(*dst); }

◆ STAGE_TAIL() [3/75]

SK_OPTS_NS::STAGE_TAIL ( atan_float  ,
F dst 
)

Definition at line 4451 of file SkRasterPipeline_opts.h.

4451{ *dst = atan_(*dst); }

◆ STAGE_TAIL() [4/75]

SK_OPTS_NS::STAGE_TAIL ( case_op  ,
SkRasterPipeline_CaseOpCtx packed 
)

Definition at line 3948 of file SkRasterPipeline_opts.h.

3948 {
3949 auto ctx = SkRPCtxUtils::Unpack(packed);
3950
3951 // Check each lane to see if the case value matches the expectation.
3952 I32* actualValue = (I32*)(base + ctx.offset);
3953 I32 caseMatches = cond_to_mask(*actualValue == ctx.expectedValue);
3954
3955 // In lanes where we found a match, enable the loop mask...
3956 g = sk_bit_cast<F>(sk_bit_cast<I32>(g) | caseMatches);
3958
3959 // ... and clear the default-case mask.
3960 I32* defaultMask = actualValue + 1;
3961 *defaultMask &= ~caseMatches;
3962}
#define update_execution_mask()

◆ STAGE_TAIL() [5/75]

SK_OPTS_NS::STAGE_TAIL ( continue_op  ,
I32 continueMask 
)

Definition at line 3939 of file SkRasterPipeline_opts.h.

3939 {
3940 // Set any currently-executing lanes in the continue-mask to true.
3941 *continueMask |= execution_mask();
3942
3943 // Disable any currently-executing lanes from the loop mask. (Just like `mask_off_loop_mask`.)
3944 g = sk_bit_cast<F>(sk_bit_cast<I32>(g) & ~execution_mask());
3946}

◆ STAGE_TAIL() [6/75]

SK_OPTS_NS::STAGE_TAIL ( copy_2_immutables_unmasked  ,
SkRasterPipeline_BinaryOpCtx packed 
)

Definition at line 4159 of file SkRasterPipeline_opts.h.

4159 {
4160 copy_n_immutable_unmasked_fn<2>(packed, base);
4161}

◆ STAGE_TAIL() [7/75]

SK_OPTS_NS::STAGE_TAIL ( copy_2_slots_masked  ,
SkRasterPipeline_BinaryOpCtx packed 
)

Definition at line 4184 of file SkRasterPipeline_opts.h.

4184 {
4185 copy_n_slots_masked_fn<2>(packed, base, execution_mask());
4186}

◆ STAGE_TAIL() [8/75]

SK_OPTS_NS::STAGE_TAIL ( copy_2_slots_unmasked  ,
SkRasterPipeline_BinaryOpCtx packed 
)

Definition at line 4129 of file SkRasterPipeline_opts.h.

4129 {
4130 copy_n_slots_unmasked_fn<2>(packed, base);
4131}

◆ STAGE_TAIL() [9/75]

SK_OPTS_NS::STAGE_TAIL ( copy_2_uniforms  ,
SkRasterPipeline_UniformCtx ctx 
)

Definition at line 4071 of file SkRasterPipeline_opts.h.

4071 {
4072 const int* src = ctx->src;
4073 I32* dst = (I32*)ctx->dst;
4074 dst[0] = I32_(src[0]);
4075 dst[1] = I32_(src[1]);
4076}

◆ STAGE_TAIL() [10/75]

SK_OPTS_NS::STAGE_TAIL ( copy_3_immutables_unmasked  ,
SkRasterPipeline_BinaryOpCtx packed 
)

Definition at line 4162 of file SkRasterPipeline_opts.h.

4162 {
4163 copy_n_immutable_unmasked_fn<3>(packed, base);
4164}

◆ STAGE_TAIL() [11/75]

SK_OPTS_NS::STAGE_TAIL ( copy_3_slots_masked  ,
SkRasterPipeline_BinaryOpCtx packed 
)

Definition at line 4187 of file SkRasterPipeline_opts.h.

4187 {
4188 copy_n_slots_masked_fn<3>(packed, base, execution_mask());
4189}

◆ STAGE_TAIL() [12/75]

SK_OPTS_NS::STAGE_TAIL ( copy_3_slots_unmasked  ,
SkRasterPipeline_BinaryOpCtx packed 
)

Definition at line 4132 of file SkRasterPipeline_opts.h.

4132 {
4133 copy_n_slots_unmasked_fn<3>(packed, base);
4134}

◆ STAGE_TAIL() [13/75]

SK_OPTS_NS::STAGE_TAIL ( copy_3_uniforms  ,
SkRasterPipeline_UniformCtx ctx 
)

Definition at line 4077 of file SkRasterPipeline_opts.h.

4077 {
4078 const int* src = ctx->src;
4079 I32* dst = (I32*)ctx->dst;
4080 dst[0] = I32_(src[0]);
4081 dst[1] = I32_(src[1]);
4082 dst[2] = I32_(src[2]);
4083}

◆ STAGE_TAIL() [14/75]

SK_OPTS_NS::STAGE_TAIL ( copy_4_immutables_unmasked  ,
SkRasterPipeline_BinaryOpCtx packed 
)

Definition at line 4165 of file SkRasterPipeline_opts.h.

4165 {
4166 copy_n_immutable_unmasked_fn<4>(packed, base);
4167}

◆ STAGE_TAIL() [15/75]

SK_OPTS_NS::STAGE_TAIL ( copy_4_slots_masked  ,
SkRasterPipeline_BinaryOpCtx packed 
)

Definition at line 4190 of file SkRasterPipeline_opts.h.

4190 {
4191 copy_n_slots_masked_fn<4>(packed, base, execution_mask());
4192}

◆ STAGE_TAIL() [16/75]

SK_OPTS_NS::STAGE_TAIL ( copy_4_slots_unmasked  ,
SkRasterPipeline_BinaryOpCtx packed 
)

Definition at line 4135 of file SkRasterPipeline_opts.h.

4135 {
4136 copy_n_slots_unmasked_fn<4>(packed, base);
4137}

◆ STAGE_TAIL() [17/75]

SK_OPTS_NS::STAGE_TAIL ( copy_4_uniforms  ,
SkRasterPipeline_UniformCtx ctx 
)

Definition at line 4084 of file SkRasterPipeline_opts.h.

4084 {
4085 const int* src = ctx->src;
4086 I32* dst = (I32*)ctx->dst;
4087 dst[0] = I32_(src[0]);
4088 dst[1] = I32_(src[1]);
4089 dst[2] = I32_(src[2]);
4090 dst[3] = I32_(src[3]);
4091}

◆ STAGE_TAIL() [18/75]

SK_OPTS_NS::STAGE_TAIL ( copy_constant  ,
SkRasterPipeline_ConstantCtx packed 
)

Definition at line 4093 of file SkRasterPipeline_opts.h.

4093 {
4094 auto ctx = SkRPCtxUtils::Unpack(packed);
4095 I32* dst = (I32*)(base + ctx.dst);
4096 I32 value = I32_(ctx.value);
4097 dst[0] = value;
4098}

◆ STAGE_TAIL() [19/75]

SK_OPTS_NS::STAGE_TAIL ( copy_from_indirect_uniform_unmasked  ,
SkRasterPipeline_CopyIndirectCtx ctx 
)

Definition at line 4296 of file SkRasterPipeline_opts.h.

4296 {
4297 // Clamp the indirect offsets to stay within the limit.
4298 U32 offsets = *(const U32*)ctx->indirectOffset;
4299 offsets = min(offsets, U32_(ctx->indirectLimit));
4300
4301 // Use gather to perform indirect lookups; write the results into `dst`.
4302 const int* src = ctx->src;
4303 I32* dst = (I32*)ctx->dst;
4304 I32* end = dst + ctx->slots;
4305 do {
4306 *dst = gather(src, offsets);
4307 dst += 1;
4308 src += 1;
4309 } while (dst != end);
4310}

◆ STAGE_TAIL() [20/75]

SK_OPTS_NS::STAGE_TAIL ( copy_from_indirect_unmasked  ,
SkRasterPipeline_CopyIndirectCtx ctx 
)

Definition at line 4272 of file SkRasterPipeline_opts.h.

4272 {
4273 // Clamp the indirect offsets to stay within the limit.
4274 U32 offsets = *(const U32*)ctx->indirectOffset;
4275 offsets = min(offsets, U32_(ctx->indirectLimit));
4276
4277 // Scale up the offsets to account for the N lanes per value.
4278 offsets *= N;
4279
4280 // Adjust the offsets forward so that they fetch from the correct lane.
4281 static constexpr uint32_t iota[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
4282 static_assert(std::size(iota) >= SkRasterPipeline_kMaxStride_highp);
4283 offsets += sk_unaligned_load<U32>(iota);
4284
4285 // Use gather to perform indirect lookups; write the results into `dst`.
4286 const int* src = ctx->src;
4287 I32* dst = (I32*)ctx->dst;
4288 I32* end = dst + ctx->slots;
4289 do {
4290 *dst = gather(src, offsets);
4291 dst += 1;
4292 src += N;
4293 } while (dst != end);
4294}

◆ STAGE_TAIL() [21/75]

SK_OPTS_NS::STAGE_TAIL ( copy_immutable_unmasked  ,
SkRasterPipeline_BinaryOpCtx packed 
)

Definition at line 4156 of file SkRasterPipeline_opts.h.

4156 {
4157 copy_n_immutable_unmasked_fn<1>(packed, base);
4158}

◆ STAGE_TAIL() [22/75]

SK_OPTS_NS::STAGE_TAIL ( copy_slot_masked  ,
SkRasterPipeline_BinaryOpCtx packed 
)

Definition at line 4181 of file SkRasterPipeline_opts.h.

4181 {
4182 copy_n_slots_masked_fn<1>(packed, base, execution_mask());
4183}

◆ STAGE_TAIL() [23/75]

SK_OPTS_NS::STAGE_TAIL ( copy_slot_unmasked  ,
SkRasterPipeline_BinaryOpCtx packed 
)

Definition at line 4126 of file SkRasterPipeline_opts.h.

4126 {
4127 copy_n_slots_unmasked_fn<1>(packed, base);
4128}

◆ STAGE_TAIL() [24/75]

SK_OPTS_NS::STAGE_TAIL ( copy_to_indirect_masked  ,
SkRasterPipeline_CopyIndirectCtx ctx 
)

Definition at line 4312 of file SkRasterPipeline_opts.h.

4312 {
4313 // Clamp the indirect offsets to stay within the limit.
4314 U32 offsets = *(const U32*)ctx->indirectOffset;
4315 offsets = min(offsets, U32_(ctx->indirectLimit));
4316
4317 // Scale up the offsets to account for the N lanes per value.
4318 offsets *= N;
4319
4320 // Adjust the offsets forward so that they store into the correct lane.
4321 static constexpr uint32_t iota[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
4322 static_assert(std::size(iota) >= SkRasterPipeline_kMaxStride_highp);
4323 offsets += sk_unaligned_load<U32>(iota);
4324
4325 // Perform indirect, masked writes into `dst`.
4326 const I32* src = (const I32*)ctx->src;
4327 const I32* end = src + ctx->slots;
4328 int* dst = ctx->dst;
4329 I32 mask = execution_mask();
4330 do {
4331 scatter_masked(*src, dst, offsets, mask);
4332 dst += N;
4333 src += 1;
4334 } while (src != end);
4335}

◆ STAGE_TAIL() [25/75]

SK_OPTS_NS::STAGE_TAIL ( copy_uniform  ,
SkRasterPipeline_UniformCtx ctx 
)

Definition at line 4066 of file SkRasterPipeline_opts.h.

4066 {
4067 const int* src = ctx->src;
4068 I32* dst = (I32*)ctx->dst;
4069 dst[0] = I32_(src[0]);
4070}

◆ STAGE_TAIL() [26/75]

SK_OPTS_NS::STAGE_TAIL ( cos_float  ,
F dst 
)

Definition at line 4447 of file SkRasterPipeline_opts.h.

4447{ *dst = cos_(*dst); }

◆ STAGE_TAIL() [27/75]

SK_OPTS_NS::STAGE_TAIL ( dot_3_floats  ,
F dst 
)

Definition at line 4770 of file SkRasterPipeline_opts.h.

4770 {
4771 dst[0] = mad(dst[0], dst[3],
4772 mad(dst[1], dst[4],
4773 dst[2] * dst[5]));
4774}

◆ STAGE_TAIL() [28/75]

SK_OPTS_NS::STAGE_TAIL ( dot_4_floats  ,
F dst 
)

Definition at line 4776 of file SkRasterPipeline_opts.h.

4776 {
4777 dst[0] = mad(dst[0], dst[4],
4778 mad(dst[1], dst[5],
4779 mad(dst[2], dst[6],
4780 dst[3] * dst[7])));
4781}

◆ STAGE_TAIL() [29/75]

SK_OPTS_NS::STAGE_TAIL ( exchange_src  ,
F rgba 
)

Definition at line 3876 of file SkRasterPipeline_opts.h.

3876 {
3877 // Swaps r,g,b,a registers with the values at `rgba`.
3878 F temp[4] = {r, g, b, a};
3879 r = rgba[0];
3880 rgba[0] = temp[0];
3881 g = rgba[1];
3882 rgba[1] = temp[1];
3883 b = rgba[2];
3884 rgba[2] = temp[2];
3885 a = rgba[3];
3886 rgba[3] = temp[3];
3887}

◆ STAGE_TAIL() [30/75]

SK_OPTS_NS::STAGE_TAIL ( exp2_float  ,
F dst 
)

Definition at line 4454 of file SkRasterPipeline_opts.h.

4454{ *dst = approx_pow2(*dst); }

◆ STAGE_TAIL() [31/75]

SK_OPTS_NS::STAGE_TAIL ( exp_float  ,
F dst 
)

Definition at line 4453 of file SkRasterPipeline_opts.h.

4453{ *dst = approx_exp(*dst); }

◆ STAGE_TAIL() [32/75]

SK_OPTS_NS::STAGE_TAIL ( init_lane_masks  ,
SkRasterPipeline_InitLaneMasksCtx ctx 
)

Definition at line 3853 of file SkRasterPipeline_opts.h.

3853 {
3854 uint32_t iota[] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
3855 static_assert(std::size(iota) >= SkRasterPipeline_kMaxStride_highp);
3856
3857 I32 mask = cond_to_mask(sk_unaligned_load<U32>(iota) < *ctx->tail);
3858 r = g = b = a = sk_bit_cast<F>(mask);
3859}

◆ STAGE_TAIL() [33/75]

SK_OPTS_NS::STAGE_TAIL ( inverse_mat2  ,
F dst 
)

Definition at line 4458 of file SkRasterPipeline_opts.h.

4458 {
4459 F a00 = dst[0], a01 = dst[1],
4460 a10 = dst[2], a11 = dst[3];
4461 F det = nmad(a01, a10, a00 * a11),
4462 invdet = rcp_precise(det);
4463 dst[0] = invdet * a11;
4464 dst[1] = -invdet * a01;
4465 dst[2] = -invdet * a10;
4466 dst[3] = invdet * a00;
4467}

◆ STAGE_TAIL() [34/75]

SK_OPTS_NS::STAGE_TAIL ( inverse_mat3  ,
F dst 
)

Definition at line 4469 of file SkRasterPipeline_opts.h.

4469 {
4470 F a00 = dst[0], a01 = dst[1], a02 = dst[2],
4471 a10 = dst[3], a11 = dst[4], a12 = dst[5],
4472 a20 = dst[6], a21 = dst[7], a22 = dst[8];
4473 F b01 = nmad(a12, a21, a22 * a11),
4474 b11 = nmad(a22, a10, a12 * a20),
4475 b21 = nmad(a11, a20, a21 * a10);
4476 F det = mad(a00, b01, mad(a01, b11, a02 * b21)),
4477 invdet = rcp_precise(det);
4478 dst[0] = invdet * b01;
4479 dst[1] = invdet * nmad(a22, a01, a02 * a21);
4480 dst[2] = invdet * nmad(a02, a11, a12 * a01);
4481 dst[3] = invdet * b11;
4482 dst[4] = invdet * nmad(a02, a20, a22 * a00);
4483 dst[5] = invdet * nmad(a12, a00, a02 * a10);
4484 dst[6] = invdet * b21;
4485 dst[7] = invdet * nmad(a21, a00, a01 * a20);
4486 dst[8] = invdet * nmad(a01, a10, a11 * a00);
4487}

◆ STAGE_TAIL() [35/75]

SK_OPTS_NS::STAGE_TAIL ( inverse_mat4  ,
F dst 
)

Definition at line 4489 of file SkRasterPipeline_opts.h.

4489 {
4490 F a00 = dst[0], a01 = dst[1], a02 = dst[2], a03 = dst[3],
4491 a10 = dst[4], a11 = dst[5], a12 = dst[6], a13 = dst[7],
4492 a20 = dst[8], a21 = dst[9], a22 = dst[10], a23 = dst[11],
4493 a30 = dst[12], a31 = dst[13], a32 = dst[14], a33 = dst[15];
4494 F b00 = nmad(a01, a10, a00 * a11),
4495 b01 = nmad(a02, a10, a00 * a12),
4496 b02 = nmad(a03, a10, a00 * a13),
4497 b03 = nmad(a02, a11, a01 * a12),
4498 b04 = nmad(a03, a11, a01 * a13),
4499 b05 = nmad(a03, a12, a02 * a13),
4500 b06 = nmad(a21, a30, a20 * a31),
4501 b07 = nmad(a22, a30, a20 * a32),
4502 b08 = nmad(a23, a30, a20 * a33),
4503 b09 = nmad(a22, a31, a21 * a32),
4504 b10 = nmad(a23, a31, a21 * a33),
4505 b11 = nmad(a23, a32, a22 * a33),
4506 det = mad(b00, b11, b05 * b06) + mad(b02, b09, b03 * b08) - mad(b01, b10, b04 * b07),
4507 invdet = rcp_precise(det);
4508 b00 *= invdet;
4509 b01 *= invdet;
4510 b02 *= invdet;
4511 b03 *= invdet;
4512 b04 *= invdet;
4513 b05 *= invdet;
4514 b06 *= invdet;
4515 b07 *= invdet;
4516 b08 *= invdet;
4517 b09 *= invdet;
4518 b10 *= invdet;
4519 b11 *= invdet;
4520 dst[0] = mad(a13, b09, nmad(a12, b10, a11*b11));
4521 dst[1] = nmad(a03, b09, nmad(a01, b11, a02*b10));
4522 dst[2] = mad(a33, b03, nmad(a32, b04, a31*b05));
4523 dst[3] = nmad(a23, b03, nmad(a21, b05, a22*b04));
4524 dst[4] = nmad(a13, b07, nmad(a10, b11, a12*b08));
4525 dst[5] = mad(a03, b07, nmad(a02, b08, a00*b11));
4526 dst[6] = nmad(a33, b01, nmad(a30, b05, a32*b02));
4527 dst[7] = mad(a23, b01, nmad(a22, b02, a20*b05));
4528 dst[8] = mad(a13, b06, nmad(a11, b08, a10*b10));
4529 dst[9] = nmad(a03, b06, nmad(a00, b10, a01*b08));
4530 dst[10] = mad(a33, b00, nmad(a31, b02, a30*b04));
4531 dst[11] = nmad(a23, b00, nmad(a20, b04, a21*b02));
4532 dst[12] = nmad(a12, b06, nmad(a10, b09, a11*b07));
4533 dst[13] = mad(a02, b06, nmad(a01, b07, a00*b09));
4534 dst[14] = nmad(a32, b00, nmad(a30, b03, a31*b01));
4535 dst[15] = mad(a22, b00, nmad(a21, b01, a20*b03));
4536}

◆ STAGE_TAIL() [36/75]

SK_OPTS_NS::STAGE_TAIL ( load_condition_mask  ,
F ctx 
)

Definition at line 3889 of file SkRasterPipeline_opts.h.

3889 {
3890 r = sk_unaligned_load<F>(ctx);
3892}

◆ STAGE_TAIL() [37/75]

SK_OPTS_NS::STAGE_TAIL ( load_loop_mask  ,
F ctx 
)

Definition at line 3910 of file SkRasterPipeline_opts.h.

3910 {
3911 g = sk_unaligned_load<F>(ctx);
3913}

◆ STAGE_TAIL() [38/75]

SK_OPTS_NS::STAGE_TAIL ( load_return_mask  ,
F ctx 
)

Definition at line 3964 of file SkRasterPipeline_opts.h.

3964 {
3965 b = sk_unaligned_load<F>(ctx);
3967}

◆ STAGE_TAIL() [39/75]

SK_OPTS_NS::STAGE_TAIL ( log2_float  ,
F dst 
)

Definition at line 4456 of file SkRasterPipeline_opts.h.

4456{ *dst = approx_log2(*dst); }

◆ STAGE_TAIL() [40/75]

SK_OPTS_NS::STAGE_TAIL ( log_float  ,
F dst 
)

Definition at line 4455 of file SkRasterPipeline_opts.h.

4455{ *dst = approx_log(*dst); }

◆ STAGE_TAIL() [41/75]

SK_OPTS_NS::STAGE_TAIL ( mask_off_loop_mask  ,
NoCtx   
)

Definition at line 3919 of file SkRasterPipeline_opts.h.

3919 {
3920 // We encountered a break statement. If a lane was active, it should be masked off now, and stay
3921 // masked-off until the termination of the loop.
3922 g = sk_bit_cast<F>(sk_bit_cast<I32>(g) & ~execution_mask());
3924}

◆ STAGE_TAIL() [42/75]

SK_OPTS_NS::STAGE_TAIL ( mask_off_return_mask  ,
NoCtx   
)

Definition at line 3973 of file SkRasterPipeline_opts.h.

3973 {
3974 // We encountered a return statement. If a lane was active, it should be masked off now, and
3975 // stay masked-off until the end of the function.
3976 b = sk_bit_cast<F>(sk_bit_cast<I32>(b) & ~execution_mask());
3978}

◆ STAGE_TAIL() [43/75]

SK_OPTS_NS::STAGE_TAIL ( matrix_multiply_2  ,
SkRasterPipeline_MatrixMultiplyCtx packed 
)

Definition at line 4832 of file SkRasterPipeline_opts.h.

4832 {
4833 matrix_multiply<2>(packed, base);
4834}

◆ STAGE_TAIL() [44/75]

SK_OPTS_NS::STAGE_TAIL ( matrix_multiply_3  ,
SkRasterPipeline_MatrixMultiplyCtx packed 
)

Definition at line 4836 of file SkRasterPipeline_opts.h.

4836 {
4837 matrix_multiply<3>(packed, base);
4838}

◆ STAGE_TAIL() [45/75]

SK_OPTS_NS::STAGE_TAIL ( matrix_multiply_4  ,
SkRasterPipeline_MatrixMultiplyCtx packed 
)

Definition at line 4840 of file SkRasterPipeline_opts.h.

4840 {
4841 matrix_multiply<4>(packed, base);
4842}

◆ STAGE_TAIL() [46/75]

SK_OPTS_NS::STAGE_TAIL ( merge_condition_mask  ,
I32 ptr 
)

Definition at line 3898 of file SkRasterPipeline_opts.h.

3898 {
3899 // Set the condition-mask to the intersection of two adjacent masks at the pointer.
3900 r = sk_bit_cast<F>(ptr[0] & ptr[1]);
3902}

◆ STAGE_TAIL() [47/75]

SK_OPTS_NS::STAGE_TAIL ( merge_inv_condition_mask  ,
I32 ptr 
)

Definition at line 3904 of file SkRasterPipeline_opts.h.

3904 {
3905 // Set the condition-mask to the intersection of the first mask and the inverse of the second.
3906 r = sk_bit_cast<F>(ptr[0] & ~ptr[1]);
3908}

◆ STAGE_TAIL() [48/75]

SK_OPTS_NS::STAGE_TAIL ( merge_loop_mask  ,
I32 ptr 
)

Definition at line 3932 of file SkRasterPipeline_opts.h.

3932 {
3933 // Set the loop-mask to the intersection of the current loop-mask with the mask at the pointer.
3934 // (Note: this behavior subtly differs from merge_condition_mask!)
3935 g = sk_bit_cast<F>(sk_bit_cast<I32>(g) & ptr[0]);
3937}

◆ STAGE_TAIL() [49/75]

SK_OPTS_NS::STAGE_TAIL ( reenable_loop_mask  ,
I32 ptr 
)

Definition at line 3926 of file SkRasterPipeline_opts.h.

3926 {
3927 // Set the loop-mask to the union of the current loop-mask with the mask at the pointer.
3928 g = sk_bit_cast<F>(sk_bit_cast<I32>(g) | ptr[0]);
3930}

◆ STAGE_TAIL() [50/75]

SK_OPTS_NS::STAGE_TAIL ( refract_4_floats  ,
F dst 
)

Definition at line 4846 of file SkRasterPipeline_opts.h.

4846 {
4847 // Algorithm adapted from https://registry.khronos.org/OpenGL-Refpages/gl4/html/refract.xhtml
4848 F *incident = dst + 0;
4849 F *normal = dst + 4;
4850 F eta = dst[8];
4851
4852 F dotNI = mad(normal[0], incident[0],
4853 mad(normal[1], incident[1],
4854 mad(normal[2], incident[2],
4855 normal[3] * incident[3])));
4856
4857 F k = 1.0 - eta * eta * (1.0 - dotNI * dotNI);
4858 F sqrt_k = sqrt_(k);
4859
4860 for (int idx = 0; idx < 4; ++idx) {
4861 dst[idx] = if_then_else(k >= 0,
4862 eta * incident[idx] - (eta * dotNI + sqrt_k) * normal[idx],
4863 0.0);
4864 }
4865}

◆ STAGE_TAIL() [51/75]

SK_OPTS_NS::STAGE_TAIL ( set_base_pointer  ,
std::byte *  p 
)

Definition at line 3838 of file SkRasterPipeline_opts.h.

3838 {
3839 base = p;
3840}

◆ STAGE_TAIL() [52/75]

SK_OPTS_NS::STAGE_TAIL ( shuffle  ,
SkRasterPipeline_ShuffleCtx ctx 
)

Definition at line 4244 of file SkRasterPipeline_opts.h.

4244 {
4245 shuffle_fn<16>((std::byte*)ctx->ptr, ctx->offsets, ctx->count);
4246}

◆ STAGE_TAIL() [53/75]

SK_OPTS_NS::STAGE_TAIL ( splat_2_constants  ,
SkRasterPipeline_ConstantCtx packed 
)

Definition at line 4099 of file SkRasterPipeline_opts.h.

4099 {
4100 auto ctx = SkRPCtxUtils::Unpack(packed);
4101 I32* dst = (I32*)(base + ctx.dst);
4102 I32 value = I32_(ctx.value);
4103 dst[0] = dst[1] = value;
4104}

◆ STAGE_TAIL() [54/75]

SK_OPTS_NS::STAGE_TAIL ( splat_3_constants  ,
SkRasterPipeline_ConstantCtx packed 
)

Definition at line 4105 of file SkRasterPipeline_opts.h.

4105 {
4106 auto ctx = SkRPCtxUtils::Unpack(packed);
4107 I32* dst = (I32*)(base + ctx.dst);
4108 I32 value = I32_(ctx.value);
4109 dst[0] = dst[1] = dst[2] = value;
4110}

◆ STAGE_TAIL() [55/75]

SK_OPTS_NS::STAGE_TAIL ( splat_4_constants  ,
SkRasterPipeline_ConstantCtx packed 
)

Definition at line 4111 of file SkRasterPipeline_opts.h.

4111 {
4112 auto ctx = SkRPCtxUtils::Unpack(packed);
4113 I32* dst = (I32*)(base + ctx.dst);
4114 I32 value = I32_(ctx.value);
4115 dst[0] = dst[1] = dst[2] = dst[3] = value;
4116}

◆ STAGE_TAIL() [56/75]

SK_OPTS_NS::STAGE_TAIL ( sqrt_float  ,
F dst 
)

Definition at line 4452 of file SkRasterPipeline_opts.h.

4452{ *dst = sqrt_(*dst); }

◆ STAGE_TAIL() [57/75]

SK_OPTS_NS::STAGE_TAIL ( store_condition_mask  ,
F ctx 
)

Definition at line 3894 of file SkRasterPipeline_opts.h.

3894 {
3895 sk_unaligned_store(ctx, r);
3896}

◆ STAGE_TAIL() [58/75]

SK_OPTS_NS::STAGE_TAIL ( store_device_xy01  ,
F dst 
)

Definition at line 3861 of file SkRasterPipeline_opts.h.

3861 {
3862 // This is very similar to `seed_shader + store_src`, but b/a are backwards.
3863 // (sk_FragCoord actually puts w=1 in the w slot.)
3864 static constexpr float iota[] = {
3865 0.5f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5f, 6.5f, 7.5f,
3866 8.5f, 9.5f,10.5f,11.5f,12.5f,13.5f,14.5f,15.5f,
3867 };
3868 static_assert(std::size(iota) >= SkRasterPipeline_kMaxStride_highp);
3869
3870 dst[0] = cast(U32_(dx)) + sk_unaligned_load<F>(iota);
3871 dst[1] = cast(U32_(dy)) + 0.5f;
3872 dst[2] = F0;
3873 dst[3] = F1;
3874}

◆ STAGE_TAIL() [59/75]

SK_OPTS_NS::STAGE_TAIL ( store_loop_mask  ,
F ctx 
)

Definition at line 3915 of file SkRasterPipeline_opts.h.

3915 {
3916 sk_unaligned_store(ctx, g);
3917}

◆ STAGE_TAIL() [60/75]

SK_OPTS_NS::STAGE_TAIL ( store_return_mask  ,
F ctx 
)

Definition at line 3969 of file SkRasterPipeline_opts.h.

3969 {
3970 sk_unaligned_store(ctx, b);
3971}

◆ STAGE_TAIL() [61/75]

SK_OPTS_NS::STAGE_TAIL ( swizzle_1  ,
SkRasterPipeline_SwizzleCtx packed 
)

Definition at line 4232 of file SkRasterPipeline_opts.h.

4232 {
4233 small_swizzle_fn<1>(packed, base);
4234}

◆ STAGE_TAIL() [62/75]

SK_OPTS_NS::STAGE_TAIL ( swizzle_2  ,
SkRasterPipeline_SwizzleCtx packed 
)

Definition at line 4235 of file SkRasterPipeline_opts.h.

4235 {
4236 small_swizzle_fn<2>(packed, base);
4237}

◆ STAGE_TAIL() [63/75]

SK_OPTS_NS::STAGE_TAIL ( swizzle_3  ,
SkRasterPipeline_SwizzleCtx packed 
)

Definition at line 4238 of file SkRasterPipeline_opts.h.

4238 {
4239 small_swizzle_fn<3>(packed, base);
4240}

◆ STAGE_TAIL() [64/75]

SK_OPTS_NS::STAGE_TAIL ( swizzle_4  ,
SkRasterPipeline_SwizzleCtx packed 
)

Definition at line 4241 of file SkRasterPipeline_opts.h.

4241 {
4242 small_swizzle_fn<4>(packed, base);
4243}

◆ STAGE_TAIL() [65/75]

SK_OPTS_NS::STAGE_TAIL ( swizzle_copy_2_slots_masked  ,
SkRasterPipeline_SwizzleCopyCtx ctx 
)

Definition at line 4262 of file SkRasterPipeline_opts.h.

4262 {
4263 swizzle_copy_masked_fn<2>((I32*)ctx->dst, (const I32*)ctx->src, ctx->offsets, execution_mask());
4264}

◆ STAGE_TAIL() [66/75]

SK_OPTS_NS::STAGE_TAIL ( swizzle_copy_3_slots_masked  ,
SkRasterPipeline_SwizzleCopyCtx ctx 
)

Definition at line 4265 of file SkRasterPipeline_opts.h.

4265 {
4266 swizzle_copy_masked_fn<3>((I32*)ctx->dst, (const I32*)ctx->src, ctx->offsets, execution_mask());
4267}

◆ STAGE_TAIL() [67/75]

SK_OPTS_NS::STAGE_TAIL ( swizzle_copy_4_slots_masked  ,
SkRasterPipeline_SwizzleCopyCtx ctx 
)

Definition at line 4268 of file SkRasterPipeline_opts.h.

4268 {
4269 swizzle_copy_masked_fn<4>((I32*)ctx->dst, (const I32*)ctx->src, ctx->offsets, execution_mask());
4270}

◆ STAGE_TAIL() [68/75]

SK_OPTS_NS::STAGE_TAIL ( swizzle_copy_slot_masked  ,
SkRasterPipeline_SwizzleCopyCtx ctx 
)

Definition at line 4259 of file SkRasterPipeline_opts.h.

4259 {
4260 swizzle_copy_masked_fn<1>((I32*)ctx->dst, (const I32*)ctx->src, ctx->offsets, execution_mask());
4261}

◆ STAGE_TAIL() [69/75]

SK_OPTS_NS::STAGE_TAIL ( swizzle_copy_to_indirect_masked  ,
SkRasterPipeline_SwizzleCopyIndirectCtx ctx 
)

Definition at line 4337 of file SkRasterPipeline_opts.h.

4337 {
4338 // Clamp the indirect offsets to stay within the limit.
4339 U32 offsets = *(const U32*)ctx->indirectOffset;
4340 offsets = min(offsets, U32_(ctx->indirectLimit));
4341
4342 // Scale up the offsets to account for the N lanes per value.
4343 offsets *= N;
4344
4345 // Adjust the offsets forward so that they store into the correct lane.
4346 static constexpr uint32_t iota[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
4347 static_assert(std::size(iota) >= SkRasterPipeline_kMaxStride_highp);
4348 offsets += sk_unaligned_load<U32>(iota);
4349
4350 // Perform indirect, masked, swizzled writes into `dst`.
4351 const I32* src = (const I32*)ctx->src;
4352 const I32* end = src + ctx->slots;
4353 std::byte* dstB = (std::byte*)ctx->dst;
4354 const uint16_t* swizzle = ctx->offsets;
4355 I32 mask = execution_mask();
4356 do {
4357 int* dst = (int*)(dstB + *swizzle);
4358 scatter_masked(*src, dst, offsets, mask);
4359 swizzle += 1;
4360 src += 1;
4361 } while (src != end);
4362}

◆ STAGE_TAIL() [70/75]

SK_OPTS_NS::STAGE_TAIL ( tan_float  ,
F dst 
)

Definition at line 4448 of file SkRasterPipeline_opts.h.

4448{ *dst = tan_(*dst); }

◆ STAGE_TAIL() [71/75]

SK_OPTS_NS::STAGE_TAIL ( trace_enter  ,
SkRasterPipeline_TraceFuncCtx ctx 
)

Definition at line 4016 of file SkRasterPipeline_opts.h.

4016 {
4017 const I32* traceMask = (const I32*)ctx->traceMask;
4018 if (any(execution_mask() & *traceMask)) {
4019 ctx->traceHook->enter(ctx->funcIdx);
4020 }
4021}
virtual void enter(int fnIdx)=0

◆ STAGE_TAIL() [72/75]

SK_OPTS_NS::STAGE_TAIL ( trace_exit  ,
SkRasterPipeline_TraceFuncCtx ctx 
)

Definition at line 4023 of file SkRasterPipeline_opts.h.

4023 {
4024 const I32* traceMask = (const I32*)ctx->traceMask;
4025 if (any(execution_mask() & *traceMask)) {
4026 ctx->traceHook->exit(ctx->funcIdx);
4027 }
4028}
virtual void exit(int fnIdx)=0

◆ STAGE_TAIL() [73/75]

SK_OPTS_NS::STAGE_TAIL ( trace_line  ,
SkRasterPipeline_TraceLineCtx ctx 
)

Definition at line 4009 of file SkRasterPipeline_opts.h.

4009 {
4010 const I32* traceMask = (const I32*)ctx->traceMask;
4011 if (any(execution_mask() & *traceMask)) {
4012 ctx->traceHook->line(ctx->lineNumber);
4013 }
4014}
virtual void line(int lineNum)=0

◆ STAGE_TAIL() [74/75]

SK_OPTS_NS::STAGE_TAIL ( trace_scope  ,
SkRasterPipeline_TraceScopeCtx ctx 
)

Definition at line 4030 of file SkRasterPipeline_opts.h.

4030 {
4031 // Note that trace_scope intentionally does not incorporate the execution mask. Otherwise, the
4032 // scopes would become unbalanced if the execution mask changed in the middle of a block. The
4033 // caller is responsible for providing a combined trace- and execution-mask.
4034 const I32* traceMask = (const I32*)ctx->traceMask;
4035 if (any(*traceMask)) {
4036 ctx->traceHook->scope(ctx->delta);
4037 }
4038}
virtual void scope(int delta)=0

◆ STAGE_TAIL() [75/75]

SK_OPTS_NS::STAGE_TAIL ( trace_var  ,
SkRasterPipeline_TraceVarCtx ctx 
)

Definition at line 4040 of file SkRasterPipeline_opts.h.

4040 {
4041 const I32* traceMask = (const I32*)ctx->traceMask;
4042 I32 mask = execution_mask() & *traceMask;
4043 if (any(mask)) {
4044 for (size_t lane = 0; lane < N; ++lane) {
4045 if (select_lane(mask, lane)) {
4046 const I32* data = (const I32*)ctx->data;
4047 int slotIdx = ctx->slotIdx, numSlots = ctx->numSlots;
4048 if (ctx->indirectOffset) {
4049 // If this was an indirect store, apply the indirect-offset to the data pointer.
4050 uint32_t indirectOffset = select_lane(*(const U32*)ctx->indirectOffset, lane);
4051 indirectOffset = std::min<uint32_t>(indirectOffset, ctx->indirectLimit);
4052 data += indirectOffset;
4053 slotIdx += indirectOffset;
4054 }
4055 while (numSlots--) {
4056 ctx->traceHook->var(slotIdx, select_lane(*data, lane));
4057 ++slotIdx;
4058 ++data;
4059 }
4060 break;
4061 }
4062 }
4063 }
4064}
virtual void var(int slot, int32_t val)=0
SI uint32_t select_lane(uint32_t data, int)

◆ start_pipeline()

static void SK_OPTS_NS::start_pipeline ( size_t  dx,
size_t  dy,
size_t  xlimit,
size_t  ylimit,
SkRasterPipelineStage program,
SkSpan< SkRasterPipeline_MemoryCtxPatch memoryCtxPatches,
uint8_t *  tailPointer 
)
static

Definition at line 1530 of file SkRasterPipeline_opts.h.

1534 {
1535 uint8_t unreferencedTail;
1536 if (!tailPointer) {
1537 tailPointer = &unreferencedTail;
1538 }
1539 auto start = (Stage)program->fn;
1540 const size_t x0 = dx;
1541 std::byte* const base = nullptr;
1542 for (; dy < ylimit; dy++) {
1543 #if JUMPER_NARROW_STAGES
1544 Params params = { x0,dy,base, F0,F0,F0,F0 };
1545 while (params.dx + N <= xlimit) {
1546 start(&params,program, F0,F0,F0,F0);
1547 params.dx += N;
1548 }
1549 if (size_t tail = xlimit - params.dx) {
1550 *tailPointer = tail;
1551 patch_memory_contexts(memoryCtxPatches, params.dx, dy, tail);
1552 start(&params,program, F0,F0,F0,F0);
1553 restore_memory_contexts(memoryCtxPatches, params.dx, dy, tail);
1554 *tailPointer = 0xFF;
1555 }
1556 #else
1557 dx = x0;
1558 while (dx + N <= xlimit) {
1559 start(program,dx,dy,base, F0,F0,F0,F0, F0,F0,F0,F0);
1560 dx += N;
1561 }
1562 if (size_t tail = xlimit - dx) {
1563 *tailPointer = tail;
1564 patch_memory_contexts(memoryCtxPatches, dx, dy, tail);
1565 start(program,dx,dy,base, F0,F0,F0,F0, F0,F0,F0,F0);
1566 restore_memory_contexts(memoryCtxPatches, dx, dy, tail);
1567 *tailPointer = 0xFF;
1568 }
1569 #endif
1570 }
1571}
static void patch_memory_contexts(SkSpan< SkRasterPipeline_MemoryCtxPatch > memoryCtxPatches, size_t dx, size_t dy, size_t tail)
static void restore_memory_contexts(SkSpan< SkRasterPipeline_MemoryCtxPatch > memoryCtxPatches, size_t dx, size_t dy, size_t tail)
skia_private::AutoTArray< sk_sp< SkImageFilter > > filters TypedMatrix matrix TypedMatrix matrix SkScalar dx
Definition SkRecords.h:208

◆ store()

template<typename V , typename T >
SI void SK_OPTS_NS::store ( T dst,
V  v 
)

Definition at line 1746 of file SkRasterPipeline_opts.h.

1746 {
1747 sk_unaligned_store(dst, v);
1748}

◆ store2()

SI void SK_OPTS_NS::store2 ( uint16_t *  ptr,
U16  r,
U16  g 
)

Definition at line 182 of file SkRasterPipeline_opts.h.

182 {
183 ptr[0] = r;
184 ptr[1] = g;
185 }

◆ store4() [1/2]

SI void SK_OPTS_NS::store4 ( float *  ptr,
F  r,
F  g,
F  b,
F  a 
)

Definition at line 205 of file SkRasterPipeline_opts.h.

205 {
206 ptr[0] = r;
207 ptr[1] = g;
208 ptr[2] = b;
209 ptr[3] = a;
210 }

◆ store4() [2/2]

SI void SK_OPTS_NS::store4 ( uint16_t *  ptr,
U16  r,
U16  g,
U16  b,
U16  a 
)

Definition at line 192 of file SkRasterPipeline_opts.h.

192 {
193 ptr[0] = r;
194 ptr[1] = g;
195 ptr[2] = b;
196 ptr[3] = a;
197 }

◆ strip_sign()

SI F SK_OPTS_NS::strip_sign ( F  x,
U32 sign 
)

Definition at line 2720 of file SkRasterPipeline_opts.h.

2720 {
2721 U32 bits = sk_bit_cast<U32>(x);
2722 *sign = bits & 0x80000000;
2723 return sk_bit_cast<F>(bits ^ *sign);
2724}

◆ sub_fn()

template<typename T >
SI void SK_OPTS_NS::sub_fn ( T dst,
T src 
)

Definition at line 4575 of file SkRasterPipeline_opts.h.

4575 {
4576 *dst -= *src;
4577}

◆ swizzle_copy_masked_fn()

template<int NumSlots>
SI void SK_OPTS_NS::swizzle_copy_masked_fn ( I32 dst,
const I32 src,
uint16_t *  offsets,
I32  mask 
)

Definition at line 4249 of file SkRasterPipeline_opts.h.

4249 {
4250 std::byte* dstB = (std::byte*)dst;
4251 SK_UNROLL for (int count = 0; count < NumSlots; ++count) {
4252 I32* dstS = (I32*)(dstB + *offsets);
4253 *dstS = if_then_else(mask, *src, *dstS);
4254 offsets += 1;
4255 src += 1;
4256 }
4257}

◆ tan_()

SI F SK_OPTS_NS::tan_ ( F  x)

Definition at line 1885 of file SkRasterPipeline_opts.h.

1885 {
1886 constexpr float Pi = SK_FloatPI;
1887 // periodic between -pi/2 ... pi/2
1888 // shift to 0...Pi, scale 1/Pi to get into 0...1, then fract, scale-up, shift-back
1889 x = mad(fract(mad(x, 1/Pi, 0.5f)), Pi, -Pi/2);
1890
1891 I32 neg = (x < 0.0f);
1892 x = if_then_else(neg, -x, x);
1893
1894 // minimize total error by shifting if x > pi/8
1895 I32 use_quotient = (x > (Pi/8));
1896 x = if_then_else(use_quotient, x - (Pi/4), x);
1897
1898 // 9th order poly = 4th order(x^2) * x
1899 const float c4 = 62 / 2835.0f;
1900 const float c3 = 17 / 315.0f;
1901 const float c2 = 2 / 15.0f;
1902 const float c1 = 1 / 3.0f;
1903 const float c0 = 1.0f;
1904 F x2 = x * x;
1905 x *= mad(x2, mad(x2, mad(x2, mad(x2, c4, c3), c2), c1), c0);
1906 x = if_then_else(use_quotient, (1+x)/(1-x), x);
1907 x = if_then_else(neg, -x, x);
1908 return x;
1909}

◆ to_half()

SI U16 SK_OPTS_NS::to_half ( F  f)

Definition at line 1422 of file SkRasterPipeline_opts.h.

1422 {
1423#if defined(JUMPER_IS_NEON) && defined(SK_CPU_ARM64)
1424 return (U16)vcvt_f16_f32(f);
1425
1426#elif defined(JUMPER_IS_SKX)
1427 return (U16)_mm512_cvtps_ph(f, _MM_FROUND_CUR_DIRECTION);
1428
1429#elif defined(JUMPER_IS_HSW)
1430 return (U16)_mm256_cvtps_ph(f, _MM_FROUND_CUR_DIRECTION);
1431
1432#else
1433 // Remember, a float is 1-8-23 (sign-exponent-mantissa) with 127 exponent bias.
1434 U32 sem = sk_bit_cast<U32>(f),
1435 s = sem & 0x80000000,
1436 em = sem ^ s;
1437
1438 // Convert to 1-5-10 half with 15 bias, flushing denorm halfs (including zero) to zero.
1439 auto denorm = (I32)em < 0x38800000; // I32 comparison is often quicker, and always safe here.
1440 return pack((U32)if_then_else(denorm, I32_(0)
1441 , (I32)((s>>16) + (em>>13) - ((127-15)<<10))));
1442#endif
1443}

◆ to_unorm()

SI U32 SK_OPTS_NS::to_unorm ( F  v,
float  scale,
float  bias = 1.0f 
)

Definition at line 2005 of file SkRasterPipeline_opts.h.

2005 {
2006 // Any time we use round() we probably want to use to_unorm().
2007 return round(min(max(0.0f, v), bias), F_(scale));
2008}

◆ trunc_()

SI U32 SK_OPTS_NS::trunc_ ( F  v)

Definition at line 1339 of file SkRasterPipeline_opts.h.

1339{ return (U32)v; }

◆ two()

SI F SK_OPTS_NS::two ( F  x)

Definition at line 2172 of file SkRasterPipeline_opts.h.

2172{ return x + x; }

◆ U32_()

SI constexpr U32 SK_OPTS_NS::U32_ ( uint32_t  x)
constexpr

Definition at line 1301 of file SkRasterPipeline_opts.h.

1301{ return x; }

Variable Documentation

◆ dst

F * SK_OPTS_NS::dst { *dst = sin_(*dst)

Definition at line 4446 of file SkRasterPipeline_opts.h.

◆ F0

constexpr F SK_OPTS_NS::F0 = F_(0.0f)
staticconstexpr

Definition at line 1309 of file SkRasterPipeline_opts.h.

◆ F1

constexpr F SK_OPTS_NS::F1 = F_(1.0f)
static

Definition at line 1310 of file SkRasterPipeline_opts.h.

◆ N

constexpr size_t SK_OPTS_NS::N = sizeof(F) / sizeof(float)
staticconstexpr

Definition at line 1491 of file SkRasterPipeline_opts.h.

◆ S32_alpha_D32_filter_DXDY

constexpr void(* SK_OPTS_NS::S32_alpha_D32_filter_DXDY) (const SkBitmapProcState &, const uint32_t *, int, SkPMColor *) ( const SkBitmapProcState ,
const uint32_t *  ,
int  ,
SkPMColor  
) = nullptr
staticconstexpr

Definition at line 574 of file SkBitmapProcState_opts.h.