Flutter Engine
The Flutter Engine
Namespaces | Classes | Typedefs | Functions | Variables
SK_OPTS_NS Namespace Reference

Namespaces

namespace  lowp
 

Classes

struct  Params
 
struct  RGB
 

Typedefs

using F = float
 
using I32 = int32_t
 
using U64 = uint64_t
 
using U32 = uint32_t
 
using U16 = uint16_t
 
using U8 = uint8_t
 
using Stage = void(ABI *)(Params *, SkRasterPipelineStage *program, F r, F g, F b, F a)
 

Functions

template<typename U32 , typename Out >
static void decode_packed_coordinates_and_weight (U32 packed, Out *v0, Out *v1, Out *w)
 
void S32_alpha_D32_filter_DX (const SkBitmapProcState &s, const uint32_t *xy, int count, uint32_t *colors)
 
static __m128i SkAlphaMul_lsx (__m128i x, __m128i y)
 
template<bool isColor>
static void D32_A8_Opaque_Color_lsx (void *SK_RESTRICT dst, size_t dstRB, const void *SK_RESTRICT maskPtr, size_t maskRB, SkColor color, int width, int height)
 
static void blit_mask_d32_a8_general (SkPMColor *dst, size_t dstRB, const SkAlpha *mask, size_t maskRB, SkColor color, int w, int h)
 
static void blit_mask_d32_a8_opaque (SkPMColor *dst, size_t dstRB, const SkAlpha *mask, size_t maskRB, SkColor color, int w, int h)
 
static void blit_mask_d32_a8_black (SkPMColor *dst, size_t dstRB, const SkAlpha *maskPtr, size_t maskRB, int width, int height)
 
void blit_mask_d32_a8 (SkPMColor *dst, size_t dstRB, const SkAlpha *mask, size_t maskRB, SkColor color, int w, int h)
 
void blit_row_s32a_opaque (SkPMColor *dst, const SkPMColor *src, int len, U8CPU alpha)
 
void blit_row_color32 (SkPMColor *dst, int count, SkPMColor color)
 
template<typename T >
static void memsetT (T buffer[], T value, int count)
 
void memset16 (uint16_t buffer[], uint16_t value, int count)
 
void memset32 (uint32_t buffer[], uint32_t value, int count)
 
void memset64 (uint64_t buffer[], uint64_t value, int count)
 
template<typename T >
static void rect_memsetT (T buffer[], T value, int count, size_t rowBytes, int height)
 
void rect_memset16 (uint16_t buffer[], uint16_t value, int count, size_t rowBytes, int height)
 
void rect_memset32 (uint32_t buffer[], uint32_t value, int count, size_t rowBytes, int height)
 
void rect_memset64 (uint64_t buffer[], uint64_t value, int count, size_t rowBytes, int height)
 
SI F min (F a, F b)
 
SI I32 min (I32 a, I32 b)
 
SI U32 min (U32 a, U32 b)
 
SI F max (F a, F b)
 
SI I32 max (I32 a, I32 b)
 
SI U32 max (U32 a, U32 b)
 
SI F mad (F f, F m, F a)
 
SI F nmad (F f, F m, F a)
 
SI F abs_ (F v)
 
SI I32 abs_ (I32 v)
 
SI F floor_ (F v)
 
SI F ceil_ (F v)
 
SI F rcp_approx (F v)
 
SI F rsqrt_approx (F v)
 
SI F sqrt_ (F v)
 
SI F rcp_precise (F v)
 
SI I32 iround (F v)
 
SI U32 round (F v)
 
SI U32 round (F v, F scale)
 
SI U16 pack (U32 v)
 
SI U8 pack (U16 v)
 
SI F if_then_else (I32 c, F t, F e)
 
SI I32 if_then_else (I32 c, I32 t, I32 e)
 
SI bool any (I32 c)
 
SI bool all (I32 c)
 
template<typename T >
SI T gather (const T *p, U32 ix)
 
SI void scatter_masked (I32 src, int *dst, U32 ix, I32 mask)
 
SI void load2 (const uint16_t *ptr, U16 *r, U16 *g)
 
SI void store2 (uint16_t *ptr, U16 r, U16 g)
 
SI void load4 (const uint16_t *ptr, U16 *r, U16 *g, U16 *b, U16 *a)
 
SI void store4 (uint16_t *ptr, U16 r, U16 g, U16 b, U16 a)
 
SI void load4 (const float *ptr, F *r, F *g, F *b, F *a)
 
SI void store4 (float *ptr, F r, F g, F b, F a)
 
SI constexpr F F_ (float x)
 
SI constexpr I32 I32_ (int32_t x)
 
SI constexpr U32 U32_ (uint32_t x)
 
SI F cast (U32 v)
 
SI F cast64 (U64 v)
 
SI U32 trunc_ (F v)
 
SI U32 expand (U16 v)
 
SI U32 expand (U8 v)
 
SI F fract (F v)
 
SI F approx_log2 (F x)
 
SI F approx_log (F x)
 
SI F approx_pow2 (F x)
 
SI F approx_exp (F x)
 
SI F approx_powf (F x, F y)
 
SI F from_half (U16 h)
 
SI U16 to_half (F f)
 
static void patch_memory_contexts (SkSpan< SkRasterPipeline_MemoryCtxPatch > memoryCtxPatches, size_t dx, size_t dy, size_t tail)
 
static void restore_memory_contexts (SkSpan< SkRasterPipeline_MemoryCtxPatch > memoryCtxPatches, size_t dx, size_t dy, size_t tail)
 
SI F rcp_fast (F v)
 
SI F rsqrt (F v)
 
static void start_pipeline (size_t dx, size_t dy, size_t xlimit, size_t ylimit, SkRasterPipelineStage *program, SkSpan< SkRasterPipeline_MemoryCtxPatch > memoryCtxPatches, uint8_t *tailPointer)
 
static void ABI just_return (Params *, SkRasterPipelineStage *, F, F, F, F)
 
static void ABI stack_checkpoint (Params *params, SkRasterPipelineStage *program, F r, F g, F b, F a)
 
static void ABI stack_rewind (Params *params, SkRasterPipelineStage *program, F r, F g, F b, F a)
 
template<typename V , typename T >
SI V load (const T *src)
 
template<typename V , typename T >
SI void store (T *dst, V v)
 
SI F from_byte (U8 b)
 
SI F from_short (U16 s)
 
SI void from_565 (U16 _565, F *r, F *g, F *b)
 
SI void from_4444 (U16 _4444, F *r, F *g, F *b, F *a)
 
SI void from_8888 (U32 _8888, F *r, F *g, F *b, F *a)
 
SI void from_88 (U16 _88, F *r, F *g)
 
SI void from_1010102 (U32 rgba, F *r, F *g, F *b, F *a)
 
SI void from_1010102_xr (U32 rgba, F *r, F *g, F *b, F *a)
 
SI void from_10101010_xr (U64 _10x6, F *r, F *g, F *b, F *a)
 
SI void from_10x6 (U64 _10x6, F *r, F *g, F *b, F *a)
 
SI void from_1616 (U32 _1616, F *r, F *g)
 
SI void from_16161616 (U64 _16161616, F *r, F *g, F *b, F *a)
 
template<typename T >
SI Tptr_at_xy (const SkRasterPipeline_MemoryCtx *ctx, size_t dx, size_t dy)
 
SI F clamp (F v, F limit)
 
SI F clamp_ex (F v, float limit)
 
SI F sin5q_ (F x)
 
SI F sin_ (F x)
 
SI F cos_ (F x)
 
SI F tan_ (F x)
 
SI F approx_atan_unit (F x)
 
SI F atan_ (F x)
 
SI F asin_ (F x)
 
SI F acos_ (F x)
 
SI F atan2_ (F y0, F x0)
 
template<typename T >
SI U32 ix_and_ptr (T **ptr, const SkRasterPipeline_GatherCtx *ctx, F x, F y)
 
SI U32 to_unorm (F v, float scale, float bias=1.0f)
 
SI I32 cond_to_mask (I32 cond)
 
SI uint32_t select_lane (uint32_t data, int)
 
SI int32_t select_lane (int32_t data, int)
 
 STAGE (seed_shader, NoCtx)
 
 STAGE (dither, const float *rate)
 
 STAGE (uniform_color, const SkRasterPipeline_UniformColorCtx *c)
 
 STAGE (unbounded_uniform_color, const SkRasterPipeline_UniformColorCtx *c)
 
 STAGE (uniform_color_dst, const SkRasterPipeline_UniformColorCtx *c)
 
 STAGE (black_color, NoCtx)
 
 STAGE (white_color, NoCtx)
 
 STAGE (load_src, const float *ptr)
 
 STAGE (store_src, float *ptr)
 
 STAGE (store_src_rg, float *ptr)
 
 STAGE (load_src_rg, float *ptr)
 
 STAGE (store_src_a, float *ptr)
 
 STAGE (load_dst, const float *ptr)
 
 STAGE (store_dst, float *ptr)
 
SI F inv (F x)
 
SI F two (F x)
 
 BLEND_MODE (clear)
 
 BLEND_MODE (srcatop)
 
 BLEND_MODE (dstatop)
 
 BLEND_MODE (srcin)
 
 BLEND_MODE (dstin)
 
 BLEND_MODE (srcout)
 
 BLEND_MODE (dstout)
 
 BLEND_MODE (srcover)
 
 BLEND_MODE (dstover)
 
 BLEND_MODE (modulate)
 
 BLEND_MODE (multiply)
 
 BLEND_MODE (plus_)
 
 BLEND_MODE (screen)
 
 BLEND_MODE (xor_)
 
 BLEND_MODE (darken)
 
 BLEND_MODE (lighten)
 
 BLEND_MODE (difference)
 
 BLEND_MODE (exclusion)
 
 BLEND_MODE (colorburn)
 
 BLEND_MODE (colordodge)
 
 BLEND_MODE (hardlight)
 
 BLEND_MODE (overlay)
 
 BLEND_MODE (softlight)
 
SI F sat (F r, F g, F b)
 
SI F lum (F r, F g, F b)
 
SI void set_sat (F *r, F *g, F *b, F s)
 
SI void set_lum (F *r, F *g, F *b, F l)
 
SI F clip_channel (F c, F l, I32 clip_low, I32 clip_high, F mn_scale, F mx_scale)
 
SI void clip_color (F *r, F *g, F *b, F a)
 
 STAGE (hue, NoCtx)
 
 STAGE (saturation, NoCtx)
 
 STAGE (color, NoCtx)
 
 STAGE (luminosity, NoCtx)
 
 STAGE (srcover_rgba_8888, const SkRasterPipeline_MemoryCtx *ctx)
 
SI F clamp_01_ (F v)
 
 STAGE (clamp_01, NoCtx)
 
 STAGE (clamp_gamut, NoCtx)
 
 STAGE (set_rgb, const float *rgb)
 
 STAGE (unbounded_set_rgb, const float *rgb)
 
 STAGE (swap_rb, NoCtx)
 
 STAGE (swap_rb_dst, NoCtx)
 
 STAGE (move_src_dst, NoCtx)
 
 STAGE (move_dst_src, NoCtx)
 
 STAGE (swap_src_dst, NoCtx)
 
 STAGE (premul, NoCtx)
 
 STAGE (premul_dst, NoCtx)
 
 STAGE (unpremul, NoCtx)
 
 STAGE (unpremul_polar, NoCtx)
 
 STAGE (force_opaque, NoCtx)
 
 STAGE (force_opaque_dst, NoCtx)
 
 STAGE (rgb_to_hsl, NoCtx)
 
 STAGE (hsl_to_rgb, NoCtx)
 
 STAGE (css_lab_to_xyz, NoCtx)
 
 STAGE (css_oklab_to_linear_srgb, NoCtx)
 
 STAGE (css_oklab_gamut_map_to_linear_srgb, NoCtx)
 
 STAGE (css_hcl_to_lab, NoCtx)
 
SI F mod_ (F x, float y)
 
SI RGB css_hsl_to_srgb_ (F h, F s, F l)
 
 STAGE (css_hsl_to_srgb, NoCtx)
 
 STAGE (css_hwb_to_srgb, NoCtx)
 
SI F alpha_coverage_from_rgb_coverage (F a, F da, F cr, F cg, F cb)
 
 STAGE (scale_1_float, const float *c)
 
 STAGE (scale_u8, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (scale_565, const SkRasterPipeline_MemoryCtx *ctx)
 
SI F lerp (F from, F to, F t)
 
 STAGE (lerp_1_float, const float *c)
 
 STAGE (scale_native, const float scales[])
 
 STAGE (lerp_native, const float scales[])
 
 STAGE (lerp_u8, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (lerp_565, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (emboss, const SkRasterPipeline_EmbossCtx *ctx)
 
 STAGE (byte_tables, const SkRasterPipeline_TablesCtx *tables)
 
SI F strip_sign (F x, U32 *sign)
 
SI F apply_sign (F x, U32 sign)
 
 STAGE (parametric, const skcms_TransferFunction *ctx)
 
 STAGE (gamma_, const float *G)
 
 STAGE (PQish, const skcms_TransferFunction *ctx)
 
 STAGE (HLGish, const skcms_TransferFunction *ctx)
 
 STAGE (HLGinvish, const skcms_TransferFunction *ctx)
 
 STAGE (load_a8, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_a8_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_a8, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_a8, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (store_r8, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_565, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_565_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_565, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_565, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_4444, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_4444_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_4444, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_4444, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_8888, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_8888_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_8888, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_8888, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_rg88, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_rg88_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_rg88, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_rg88, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_a16, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_a16_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_a16, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_a16, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_rg1616, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_rg1616_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_rg1616, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_rg1616, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_16161616, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_16161616_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_16161616, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_16161616, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_10x6, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_10x6_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_10x6, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_10x6, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_1010102, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_1010102_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_1010102_xr, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_1010102_xr_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_1010102, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (gather_1010102_xr, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (gather_10101010_xr, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (load_10101010_xr, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_10101010_xr_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (store_10101010_xr, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (store_1010102, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (store_1010102_xr, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_f16, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_f16_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_f16, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_f16, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_af16, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_af16_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_af16, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_af16, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_rgf16, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_rgf16_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_rgf16, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_rgf16, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_f32, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (load_f32_dst, const SkRasterPipeline_MemoryCtx *ctx)
 
 STAGE (gather_f32, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (store_f32, const SkRasterPipeline_MemoryCtx *ctx)
 
SI F exclusive_repeat (F v, const SkRasterPipeline_TileCtx *ctx)
 
SI F exclusive_mirror (F v, const SkRasterPipeline_TileCtx *ctx)
 
 STAGE (repeat_x, const SkRasterPipeline_TileCtx *ctx)
 
 STAGE (repeat_y, const SkRasterPipeline_TileCtx *ctx)
 
 STAGE (mirror_x, const SkRasterPipeline_TileCtx *ctx)
 
 STAGE (mirror_y, const SkRasterPipeline_TileCtx *ctx)
 
 STAGE (clamp_x_1, NoCtx)
 
 STAGE (repeat_x_1, NoCtx)
 
 STAGE (mirror_x_1, NoCtx)
 
 STAGE (clamp_x_and_y, const SkRasterPipeline_CoordClampCtx *ctx)
 
 STAGE (decal_x, SkRasterPipeline_DecalTileCtx *ctx)
 
 STAGE (decal_y, SkRasterPipeline_DecalTileCtx *ctx)
 
 STAGE (decal_x_and_y, SkRasterPipeline_DecalTileCtx *ctx)
 
 STAGE (check_decal_mask, SkRasterPipeline_DecalTileCtx *ctx)
 
 STAGE (alpha_to_gray, NoCtx)
 
 STAGE (alpha_to_gray_dst, NoCtx)
 
 STAGE (alpha_to_red, NoCtx)
 
 STAGE (alpha_to_red_dst, NoCtx)
 
 STAGE (bt709_luminance_or_luma_to_alpha, NoCtx)
 
 STAGE (bt709_luminance_or_luma_to_rgb, NoCtx)
 
 STAGE (matrix_translate, const float *m)
 
 STAGE (matrix_scale_translate, const float *m)
 
 STAGE (matrix_2x3, const float *m)
 
 STAGE (matrix_3x3, const float *m)
 
 STAGE (matrix_3x4, const float *m)
 
 STAGE (matrix_4x5, const float *m)
 
 STAGE (matrix_4x3, const float *m)
 
 STAGE (matrix_perspective, const float *m)
 
SI void gradient_lookup (const SkRasterPipeline_GradientCtx *c, U32 idx, F t, F *r, F *g, F *b, F *a)
 
 STAGE (evenly_spaced_gradient, const SkRasterPipeline_GradientCtx *c)
 
 STAGE (gradient, const SkRasterPipeline_GradientCtx *c)
 
 STAGE (evenly_spaced_2_stop_gradient, const SkRasterPipeline_EvenlySpaced2StopGradientCtx *c)
 
 STAGE (xy_to_unit_angle, NoCtx)
 
 STAGE (xy_to_radius, NoCtx)
 
 STAGE (negate_x, NoCtx)
 
 STAGE (xy_to_2pt_conical_strip, const SkRasterPipeline_2PtConicalCtx *ctx)
 
 STAGE (xy_to_2pt_conical_focal_on_circle, NoCtx)
 
 STAGE (xy_to_2pt_conical_well_behaved, const SkRasterPipeline_2PtConicalCtx *ctx)
 
 STAGE (xy_to_2pt_conical_greater, const SkRasterPipeline_2PtConicalCtx *ctx)
 
 STAGE (xy_to_2pt_conical_smaller, const SkRasterPipeline_2PtConicalCtx *ctx)
 
 STAGE (alter_2pt_conical_compensate_focal, const SkRasterPipeline_2PtConicalCtx *ctx)
 
 STAGE (alter_2pt_conical_unswap, NoCtx)
 
 STAGE (mask_2pt_conical_nan, SkRasterPipeline_2PtConicalCtx *c)
 
 STAGE (mask_2pt_conical_degenerates, SkRasterPipeline_2PtConicalCtx *c)
 
 STAGE (apply_vector_mask, const uint32_t *ctx)
 
SI void save_xy (F *r, F *g, SkRasterPipeline_SamplerCtx *c)
 
 STAGE (accumulate, const SkRasterPipeline_SamplerCtx *c)
 
template<int kScale>
SI void bilinear_x (SkRasterPipeline_SamplerCtx *ctx, F *x)
 
template<int kScale>
SI void bilinear_y (SkRasterPipeline_SamplerCtx *ctx, F *y)
 
 STAGE (bilinear_setup, SkRasterPipeline_SamplerCtx *ctx)
 
 STAGE (bilinear_nx, SkRasterPipeline_SamplerCtx *ctx)
 
 STAGE (bilinear_px, SkRasterPipeline_SamplerCtx *ctx)
 
 STAGE (bilinear_ny, SkRasterPipeline_SamplerCtx *ctx)
 
 STAGE (bilinear_py, SkRasterPipeline_SamplerCtx *ctx)
 
SI F bicubic_wts (F t, float A, float B, float C, float D)
 
template<int kScale>
SI void bicubic_x (SkRasterPipeline_SamplerCtx *ctx, F *x)
 
template<int kScale>
SI void bicubic_y (SkRasterPipeline_SamplerCtx *ctx, F *y)
 
 STAGE (bicubic_setup, SkRasterPipeline_SamplerCtx *ctx)
 
 STAGE (bicubic_n3x, SkRasterPipeline_SamplerCtx *ctx)
 
 STAGE (bicubic_n1x, SkRasterPipeline_SamplerCtx *ctx)
 
 STAGE (bicubic_p1x, SkRasterPipeline_SamplerCtx *ctx)
 
 STAGE (bicubic_p3x, SkRasterPipeline_SamplerCtx *ctx)
 
 STAGE (bicubic_n3y, SkRasterPipeline_SamplerCtx *ctx)
 
 STAGE (bicubic_n1y, SkRasterPipeline_SamplerCtx *ctx)
 
 STAGE (bicubic_p1y, SkRasterPipeline_SamplerCtx *ctx)
 
 STAGE (bicubic_p3y, SkRasterPipeline_SamplerCtx *ctx)
 
SI F compute_perlin_vector (U32 sample, F x, F y)
 
 STAGE (perlin_noise, SkRasterPipeline_PerlinNoiseCtx *ctx)
 
 STAGE (mipmap_linear_init, SkRasterPipeline_MipmapCtx *ctx)
 
 STAGE (mipmap_linear_update, SkRasterPipeline_MipmapCtx *ctx)
 
 STAGE (mipmap_linear_finish, SkRasterPipeline_MipmapCtx *ctx)
 
 STAGE (callback, SkRasterPipeline_CallbackCtx *c)
 
 STAGE_TAIL (set_base_pointer, std::byte *p)
 
 STAGE_TAIL (init_lane_masks, SkRasterPipeline_InitLaneMasksCtx *ctx)
 
 STAGE_TAIL (store_device_xy01, F *dst)
 
 STAGE_TAIL (exchange_src, F *rgba)
 
 STAGE_TAIL (load_condition_mask, F *ctx)
 
 STAGE_TAIL (store_condition_mask, F *ctx)
 
 STAGE_TAIL (merge_condition_mask, I32 *ptr)
 
 STAGE_TAIL (merge_inv_condition_mask, I32 *ptr)
 
 STAGE_TAIL (load_loop_mask, F *ctx)
 
 STAGE_TAIL (store_loop_mask, F *ctx)
 
 STAGE_TAIL (mask_off_loop_mask, NoCtx)
 
 STAGE_TAIL (reenable_loop_mask, I32 *ptr)
 
 STAGE_TAIL (merge_loop_mask, I32 *ptr)
 
 STAGE_TAIL (continue_op, I32 *continueMask)
 
 STAGE_TAIL (case_op, SkRasterPipeline_CaseOpCtx *packed)
 
 STAGE_TAIL (load_return_mask, F *ctx)
 
 STAGE_TAIL (store_return_mask, F *ctx)
 
 STAGE_TAIL (mask_off_return_mask, NoCtx)
 
 STAGE_BRANCH (branch_if_all_lanes_active, SkRasterPipeline_BranchIfAllLanesActiveCtx *ctx)
 
 STAGE_BRANCH (branch_if_any_lanes_active, SkRasterPipeline_BranchCtx *ctx)
 
 STAGE_BRANCH (branch_if_no_lanes_active, SkRasterPipeline_BranchCtx *ctx)
 
 STAGE_BRANCH (jump, SkRasterPipeline_BranchCtx *ctx)
 
 STAGE_BRANCH (branch_if_no_active_lanes_eq, SkRasterPipeline_BranchIfEqualCtx *ctx)
 
 STAGE_TAIL (trace_line, SkRasterPipeline_TraceLineCtx *ctx)
 
 STAGE_TAIL (trace_enter, SkRasterPipeline_TraceFuncCtx *ctx)
 
 STAGE_TAIL (trace_exit, SkRasterPipeline_TraceFuncCtx *ctx)
 
 STAGE_TAIL (trace_scope, SkRasterPipeline_TraceScopeCtx *ctx)
 
 STAGE_TAIL (trace_var, SkRasterPipeline_TraceVarCtx *ctx)
 
 STAGE_TAIL (copy_uniform, SkRasterPipeline_UniformCtx *ctx)
 
 STAGE_TAIL (copy_2_uniforms, SkRasterPipeline_UniformCtx *ctx)
 
 STAGE_TAIL (copy_3_uniforms, SkRasterPipeline_UniformCtx *ctx)
 
 STAGE_TAIL (copy_4_uniforms, SkRasterPipeline_UniformCtx *ctx)
 
 STAGE_TAIL (copy_constant, SkRasterPipeline_ConstantCtx *packed)
 
 STAGE_TAIL (splat_2_constants, SkRasterPipeline_ConstantCtx *packed)
 
 STAGE_TAIL (splat_3_constants, SkRasterPipeline_ConstantCtx *packed)
 
 STAGE_TAIL (splat_4_constants, SkRasterPipeline_ConstantCtx *packed)
 
template<int NumSlots>
SI void copy_n_slots_unmasked_fn (SkRasterPipeline_BinaryOpCtx *packed, std::byte *base)
 
 STAGE_TAIL (copy_slot_unmasked, SkRasterPipeline_BinaryOpCtx *packed)
 
 STAGE_TAIL (copy_2_slots_unmasked, SkRasterPipeline_BinaryOpCtx *packed)
 
 STAGE_TAIL (copy_3_slots_unmasked, SkRasterPipeline_BinaryOpCtx *packed)
 
 STAGE_TAIL (copy_4_slots_unmasked, SkRasterPipeline_BinaryOpCtx *packed)
 
template<int NumSlots>
SI void copy_n_immutable_unmasked_fn (SkRasterPipeline_BinaryOpCtx *packed, std::byte *base)
 
 STAGE_TAIL (copy_immutable_unmasked, SkRasterPipeline_BinaryOpCtx *packed)
 
 STAGE_TAIL (copy_2_immutables_unmasked, SkRasterPipeline_BinaryOpCtx *packed)
 
 STAGE_TAIL (copy_3_immutables_unmasked, SkRasterPipeline_BinaryOpCtx *packed)
 
 STAGE_TAIL (copy_4_immutables_unmasked, SkRasterPipeline_BinaryOpCtx *packed)
 
template<int NumSlots>
SI void copy_n_slots_masked_fn (SkRasterPipeline_BinaryOpCtx *packed, std::byte *base, I32 mask)
 
 STAGE_TAIL (copy_slot_masked, SkRasterPipeline_BinaryOpCtx *packed)
 
 STAGE_TAIL (copy_2_slots_masked, SkRasterPipeline_BinaryOpCtx *packed)
 
 STAGE_TAIL (copy_3_slots_masked, SkRasterPipeline_BinaryOpCtx *packed)
 
 STAGE_TAIL (copy_4_slots_masked, SkRasterPipeline_BinaryOpCtx *packed)
 
template<int LoopCount, typename OffsetType >
SI void shuffle_fn (std::byte *ptr, OffsetType *offsets, int numSlots)
 
template<int N>
SI void small_swizzle_fn (SkRasterPipeline_SwizzleCtx *packed, std::byte *base)
 
 STAGE_TAIL (swizzle_1, SkRasterPipeline_SwizzleCtx *packed)
 
 STAGE_TAIL (swizzle_2, SkRasterPipeline_SwizzleCtx *packed)
 
 STAGE_TAIL (swizzle_3, SkRasterPipeline_SwizzleCtx *packed)
 
 STAGE_TAIL (swizzle_4, SkRasterPipeline_SwizzleCtx *packed)
 
 STAGE_TAIL (shuffle, SkRasterPipeline_ShuffleCtx *ctx)
 
template<int NumSlots>
SI void swizzle_copy_masked_fn (I32 *dst, const I32 *src, uint16_t *offsets, I32 mask)
 
 STAGE_TAIL (swizzle_copy_slot_masked, SkRasterPipeline_SwizzleCopyCtx *ctx)
 
 STAGE_TAIL (swizzle_copy_2_slots_masked, SkRasterPipeline_SwizzleCopyCtx *ctx)
 
 STAGE_TAIL (swizzle_copy_3_slots_masked, SkRasterPipeline_SwizzleCopyCtx *ctx)
 
 STAGE_TAIL (swizzle_copy_4_slots_masked, SkRasterPipeline_SwizzleCopyCtx *ctx)
 
 STAGE_TAIL (copy_from_indirect_unmasked, SkRasterPipeline_CopyIndirectCtx *ctx)
 
 STAGE_TAIL (copy_from_indirect_uniform_unmasked, SkRasterPipeline_CopyIndirectCtx *ctx)
 
 STAGE_TAIL (copy_to_indirect_masked, SkRasterPipeline_CopyIndirectCtx *ctx)
 
 STAGE_TAIL (swizzle_copy_to_indirect_masked, SkRasterPipeline_SwizzleCopyIndirectCtx *ctx)
 
template<typename T , void(*)(T *) ApplyFn>
SI void apply_adjacent_unary (T *dst, T *end)
 
template<typename T >
SI void cast_to_float_from_fn (T *dst)
 
SI void cast_to_int_from_fn (F *dst)
 
SI void cast_to_uint_from_fn (F *dst)
 
SI void abs_fn (I32 *dst)
 
SI void floor_fn (F *dst)
 
SI void ceil_fn (F *dst)
 
SI void invsqrt_fn (F *dst)
 
 DECLARE_UNARY_INT (cast_to_float_from) DECLARE_UNARY_UINT(cast_to_float_from) STAGE_TAIL(sin_float
 
 STAGE_TAIL (cos_float, F *dst)
 
 STAGE_TAIL (tan_float, F *dst)
 
 STAGE_TAIL (asin_float, F *dst)
 
 STAGE_TAIL (acos_float, F *dst)
 
 STAGE_TAIL (atan_float, F *dst)
 
 STAGE_TAIL (sqrt_float, F *dst)
 
 STAGE_TAIL (exp_float, F *dst)
 
 STAGE_TAIL (exp2_float, F *dst)
 
 STAGE_TAIL (log_float, F *dst)
 
 STAGE_TAIL (log2_float, F *dst)
 
 STAGE_TAIL (inverse_mat2, F *dst)
 
 STAGE_TAIL (inverse_mat3, F *dst)
 
 STAGE_TAIL (inverse_mat4, F *dst)
 
template<typename T , void(*)(T *, T *) ApplyFn>
SI void apply_adjacent_binary (T *dst, T *src)
 
template<typename T , void(*)(T *, T *) ApplyFn>
SI void apply_adjacent_binary_packed (SkRasterPipeline_BinaryOpCtx *packed, std::byte *base)
 
template<int N, typename V , typename S , void(*)(V *, V *) ApplyFn>
SI void apply_binary_immediate (SkRasterPipeline_ConstantCtx *packed, std::byte *base)
 
template<typename T >
SI void add_fn (T *dst, T *src)
 
template<typename T >
SI void sub_fn (T *dst, T *src)
 
template<typename T >
SI void mul_fn (T *dst, T *src)
 
template<typename T >
SI void div_fn (T *dst, T *src)
 
SI void bitwise_and_fn (I32 *dst, I32 *src)
 
SI void bitwise_or_fn (I32 *dst, I32 *src)
 
SI void bitwise_xor_fn (I32 *dst, I32 *src)
 
template<typename T >
SI void max_fn (T *dst, T *src)
 
template<typename T >
SI void min_fn (T *dst, T *src)
 
template<typename T >
SI void cmplt_fn (T *dst, T *src)
 
template<typename T >
SI void cmple_fn (T *dst, T *src)
 
template<typename T >
SI void cmpeq_fn (T *dst, T *src)
 
template<typename T >
SI void cmpne_fn (T *dst, T *src)
 
SI void atan2_fn (F *dst, F *src)
 
SI void pow_fn (F *dst, F *src)
 
SI void mod_fn (F *dst, F *src)
 
 DECLARE_BINARY_FLOAT (add) DECLARE_BINARY_INT(add) DECLARE_BINARY_FLOAT(sub) DECLARE_BINARY_INT(sub) DECLARE_BINARY_FLOAT(mul) DECLARE_BINARY_INT(mul) DECLARE_BINARY_FLOAT(div) DECLARE_BINARY_INT(div) DECLARE_BINARY_UINT(div) DECLARE_BINARY_FLOAT(min) DECLARE_BINARY_INT(min) DECLARE_BINARY_UINT(min) DECLARE_BINARY_FLOAT(max) DECLARE_BINARY_INT(max) DECLARE_BINARY_UINT(max) DECLARE_BINARY_FLOAT(cmplt) DECLARE_BINARY_INT(cmplt) DECLARE_BINARY_UINT(cmplt) DECLARE_BINARY_FLOAT(cmple) DECLARE_BINARY_INT(cmple) DECLARE_BINARY_UINT(cmple) DECLARE_BINARY_FLOAT(cmpeq) DECLARE_BINARY_INT(cmpeq) DECLARE_BINARY_FLOAT(cmpne) DECLARE_BINARY_INT(cmpne) DECLARE_IMM_BINARY_FLOAT(add) DECLARE_IMM_BINARY_INT(add) DECLARE_IMM_BINARY_FLOAT(mul) DECLARE_IMM_BINARY_INT(mul) DECLARE_IMM_BINARY_FLOAT(cmplt) DECLARE_IMM_BINARY_INT(cmplt) DECLARE_IMM_BINARY_UINT(cmplt) DECLARE_IMM_BINARY_FLOAT(cmple) DECLARE_IMM_BINARY_INT(cmple) DECLARE_IMM_BINARY_UINT(cmple) DECLARE_IMM_BINARY_FLOAT(cmpeq) DECLARE_IMM_BINARY_INT(cmpeq) DECLARE_IMM_BINARY_FLOAT(cmpne) DECLARE_IMM_BINARY_INT(cmpne) STAGE_TAIL(dot_2_floats
 
 STAGE_TAIL (dot_3_floats, F *dst)
 
 STAGE_TAIL (dot_4_floats, F *dst)
 
template<int N>
SI void matrix_multiply (SkRasterPipeline_MatrixMultiplyCtx *packed, std::byte *base)
 
 STAGE_TAIL (matrix_multiply_2, SkRasterPipeline_MatrixMultiplyCtx *packed)
 
 STAGE_TAIL (matrix_multiply_3, SkRasterPipeline_MatrixMultiplyCtx *packed)
 
 STAGE_TAIL (matrix_multiply_4, SkRasterPipeline_MatrixMultiplyCtx *packed)
 
 STAGE_TAIL (refract_4_floats, F *dst)
 
template<typename T , void(*)(T *, T *, T *) ApplyFn>
SI void apply_adjacent_ternary (T *dst, T *src0, T *src1)
 
template<typename T , void(*)(T *, T *, T *) ApplyFn>
SI void apply_adjacent_ternary_packed (SkRasterPipeline_TernaryOpCtx *packed, std::byte *base)
 
SI void mix_fn (F *a, F *x, F *y)
 
SI void mix_fn (I32 *a, I32 *x, I32 *y)
 
SI void smoothstep_fn (F *edge0, F *edge1, F *x)
 
 STAGE (gauss_a_to_rgba, NoCtx)
 
 STAGE (bilerp_clamp_8888, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (bicubic_clamp_8888, const SkRasterPipeline_GatherCtx *ctx)
 
 STAGE (swizzle, void *ctx)
 
constexpr size_t raster_pipeline_lowp_stride ()
 
constexpr size_t raster_pipeline_highp_stride ()
 

Variables

static constexpr void(* S32_alpha_D32_filter_DXDY )(const SkBitmapProcState &, const uint32_t *, int, SkPMColor *) = nullptr
 
static constexpr F F0 = F_(0.0f)
 
static constexpr F F1 = F_(1.0f)
 
static constexpr size_t N = sizeof(F) / sizeof(float)
 
Fdst { *dst = sin_(*dst)
 

Typedef Documentation

◆ F

using SK_OPTS_NS::F = typedef float

Definition at line 134 of file SkRasterPipeline_opts.h.

◆ I32

using SK_OPTS_NS::I32 = typedef int32_t

Definition at line 135 of file SkRasterPipeline_opts.h.

◆ Stage

using SK_OPTS_NS::Stage = typedef void(ABI*)(Params*, SkRasterPipelineStage* program, F r, F g, F b, F a)

Definition at line 1524 of file SkRasterPipeline_opts.h.

◆ U16

using SK_OPTS_NS::U16 = typedef uint16_t

Definition at line 138 of file SkRasterPipeline_opts.h.

◆ U32

using SK_OPTS_NS::U32 = typedef uint32_t

Definition at line 137 of file SkRasterPipeline_opts.h.

◆ U64

using SK_OPTS_NS::U64 = typedef uint64_t

Definition at line 136 of file SkRasterPipeline_opts.h.

◆ U8

using SK_OPTS_NS::U8 = typedef uint8_t

Definition at line 139 of file SkRasterPipeline_opts.h.

Function Documentation

◆ abs_() [1/2]

SI F SK_OPTS_NS::abs_ ( F  v)

Definition at line 150 of file SkRasterPipeline_opts.h.

150{ return fabsf(v); }

◆ abs_() [2/2]

SI I32 SK_OPTS_NS::abs_ ( I32  v)

Definition at line 151 of file SkRasterPipeline_opts.h.

151{ return v < 0 ? -v : v; }

◆ abs_fn()

SI void SK_OPTS_NS::abs_fn ( I32 dst)

Definition at line 4398 of file SkRasterPipeline_opts.h.

4398 {
4399 *dst = abs_(*dst);
4400}
SI I32 abs_(I32 v)

◆ acos_()

SI F SK_OPTS_NS::acos_ ( F  x)

Definition at line 1957 of file SkRasterPipeline_opts.h.

1957 {
1958 return SK_FloatPI/2 - asin_(x);
1959}
constexpr float SK_FloatPI
double x

◆ add_fn()

template<typename T >
SI void SK_OPTS_NS::add_fn ( T dst,
T src 
)

Definition at line 4569 of file SkRasterPipeline_opts.h.

4569 {
4570 *dst += *src;
4571}

◆ all()

SI bool SK_OPTS_NS::all ( I32  c)

Definition at line 169 of file SkRasterPipeline_opts.h.

169{ return c != 0; }

◆ alpha_coverage_from_rgb_coverage()

SI F SK_OPTS_NS::alpha_coverage_from_rgb_coverage ( F  a,
F  da,
F  cr,
F  cg,
F  cb 
)

Definition at line 2615 of file SkRasterPipeline_opts.h.

2615 {
2616 return if_then_else(a < da, min(cr, min(cg,cb))
2617 , max(cr, max(cg,cb)));
2618}
SI T if_then_else(C cond, T t, T e)
struct MyStruct a[10]
static float max(float r, float g, float b)
Definition: hsl.cpp:49
static float min(float r, float g, float b)
Definition: hsl.cpp:48

◆ any()

SI bool SK_OPTS_NS::any ( I32  c)

Definition at line 168 of file SkRasterPipeline_opts.h.

168{ return c != 0; }

◆ apply_adjacent_binary()

template<typename T , void(*)(T *, T *) ApplyFn>
SI void SK_OPTS_NS::apply_adjacent_binary ( T dst,
T src 
)

Definition at line 4539 of file SkRasterPipeline_opts.h.

4539 {
4540 T* end = src;
4541 do {
4542 ApplyFn(dst, src);
4543 dst += 1;
4544 src += 1;
4545 } while (dst != end);
4546}
glong glong end
#define T
Definition: precompiler.cc:65

◆ apply_adjacent_binary_packed()

template<typename T , void(*)(T *, T *) ApplyFn>
SI void SK_OPTS_NS::apply_adjacent_binary_packed ( SkRasterPipeline_BinaryOpCtx packed,
std::byte *  base 
)

Definition at line 4549 of file SkRasterPipeline_opts.h.

4549 {
4550 auto ctx = SkRPCtxUtils::Unpack(packed);
4551 std::byte* dst = base + ctx.dst;
4552 std::byte* src = base + ctx.src;
4553 apply_adjacent_binary<T, ApplyFn>((T*)dst, (T*)src);
4554}
static UnpackedType< T > Unpack(const T *ctx)

◆ apply_adjacent_ternary()

template<typename T , void(*)(T *, T *, T *) ApplyFn>
SI void SK_OPTS_NS::apply_adjacent_ternary ( T dst,
T src0,
T src1 
)

Definition at line 4868 of file SkRasterPipeline_opts.h.

4868 {
4869 int count = src0 - dst;
4870#if !defined(JUMPER_IS_SCALAR)
4871 SK_ASSUME(count >= 1);
4872#endif
4873
4874 for (int index = 0; index < count; ++index) {
4875 ApplyFn(dst, src0, src1);
4876 dst += 1;
4877 src0 += 1;
4878 src1 += 1;
4879 }
4880}
int count
Definition: FontMgrTest.cpp:50
#define SK_ASSUME(cond)
Definition: SkAssert.h:44

◆ apply_adjacent_ternary_packed()

template<typename T , void(*)(T *, T *, T *) ApplyFn>
SI void SK_OPTS_NS::apply_adjacent_ternary_packed ( SkRasterPipeline_TernaryOpCtx packed,
std::byte *  base 
)

Definition at line 4883 of file SkRasterPipeline_opts.h.

4883 {
4884 auto ctx = SkRPCtxUtils::Unpack(packed);
4885 std::byte* dst = base + ctx.dst;
4886 std::byte* src0 = dst + ctx.delta;
4887 std::byte* src1 = src0 + ctx.delta;
4888 apply_adjacent_ternary<T, ApplyFn>((T*)dst, (T*)src0, (T*)src1);
4889}

◆ apply_adjacent_unary()

template<typename T , void(*)(T *) ApplyFn>
SI void SK_OPTS_NS::apply_adjacent_unary ( T dst,
T end 
)

Definition at line 4367 of file SkRasterPipeline_opts.h.

4367 {
4368 do {
4369 ApplyFn(dst);
4370 dst += 1;
4371 } while (dst != end);
4372}

◆ apply_binary_immediate()

template<int N, typename V , typename S , void(*)(V *, V *) ApplyFn>
SI void SK_OPTS_NS::apply_binary_immediate ( SkRasterPipeline_ConstantCtx packed,
std::byte *  base 
)

Definition at line 4557 of file SkRasterPipeline_opts.h.

4557 {
4558 auto ctx = SkRPCtxUtils::Unpack(packed);
4559 V* dst = (V*)(base + ctx.dst); // get a pointer to the destination
4560 S scalar = sk_bit_cast<S>(ctx.value); // bit-pun the constant value as desired
4561 V src = scalar - V(); // broadcast the constant value into a vector
4562 SK_UNROLL for (int index = 0; index < N; ++index) {
4563 ApplyFn(dst, &src); // perform the operation
4564 dst += 1;
4565 }
4566}
#define SK_UNROLL
T __attribute__((ext_vector_type(N))) V
static constexpr size_t N
#define V(name)
Definition: raw_object.h:125

◆ apply_sign()

SI F SK_OPTS_NS::apply_sign ( F  x,
U32  sign 
)

Definition at line 2725 of file SkRasterPipeline_opts.h.

2725 {
2726 return sk_bit_cast<F>(sign | sk_bit_cast<U32>(x));
2727}
static int sign(SkScalar x)
Definition: SkPath.cpp:2205

◆ approx_atan_unit()

SI F SK_OPTS_NS::approx_atan_unit ( F  x)

Definition at line 1915 of file SkRasterPipeline_opts.h.

1915 {
1916 // y = 0.14130025741326729 x⁴
1917 // - 0.34312835980675116 x³
1918 // - 0.016172900528248768 x²
1919 // + 1.00376969762003850 x
1920 // - 0.00014758242182738969
1921 const float c4 = 0.14130025741326729f;
1922 const float c3 = -0.34312835980675116f;
1923 const float c2 = -0.016172900528248768f;
1924 const float c1 = 1.0037696976200385f;
1925 const float c0 = -0.00014758242182738969f;
1926 return mad(x, mad(x, mad(x, mad(x, c4, c3), c2), c1), c0);
1927}
SI F mad(F f, F m, F a)

◆ approx_exp()

SI F SK_OPTS_NS::approx_exp ( F  x)

Definition at line 1386 of file SkRasterPipeline_opts.h.

1386 {
1387 const float log2_e = 1.4426950408889634074f;
1388 return approx_pow2(log2_e * x);
1389}

◆ approx_log()

SI F SK_OPTS_NS::approx_log ( F  x)

Definition at line 1369 of file SkRasterPipeline_opts.h.

1369 {
1370 const float ln2 = 0.69314718f;
1371 return ln2 * approx_log2(x);
1372}
SI F approx_log2(F x)

◆ approx_log2()

SI F SK_OPTS_NS::approx_log2 ( F  x)

Definition at line 1359 of file SkRasterPipeline_opts.h.

1359 {
1360 // e - 127 is a fair approximation of log2(x) in its own right...
1361 F e = cast(sk_bit_cast<U32>(x)) * (1.0f / (1<<23));
1362
1363 // ... but using the mantissa to refine its error is _much_ better.
1364 F m = sk_bit_cast<F>((sk_bit_cast<U32>(x) & 0x007fffff) | 0x3f000000);
1365
1366 return nmad(m, 1.498030302f, e - 124.225514990f) - 1.725879990f / (0.3520887068f + m);
1367}
SI D cast(const S &v)
SI F nmad(F f, F m, F a)
Definition: SkMD5.cpp:120

◆ approx_pow2()

SI F SK_OPTS_NS::approx_pow2 ( F  x)

Definition at line 1374 of file SkRasterPipeline_opts.h.

1374 {
1375 constexpr float kInfinityBits = 0x7f800000;
1376
1377 F f = fract(x);
1378 F approx = nmad(f, 1.490129070f, x + 121.274057500f);
1379 approx += 27.728023300f / (4.84252568f - f);
1380 approx *= 1.0f * (1<<23);
1381 approx = min(max(approx, F0), F_(kInfinityBits)); // guard against underflow/overflow
1382
1383 return sk_bit_cast<F>(round(approx));
1384}
static void round(SkPoint *p)
constexpr auto F_
static constexpr F F0
Definition: Transform_inl.h:27

◆ approx_powf()

SI F SK_OPTS_NS::approx_powf ( F  x,
F  y 
)

Definition at line 1391 of file SkRasterPipeline_opts.h.

1391 {
1392 return if_then_else((x == 0)|(x == 1), x
1393 , approx_pow2(approx_log2(x) * y));
1394}
double y

◆ asin_()

SI F SK_OPTS_NS::asin_ ( F  x)

Definition at line 1944 of file SkRasterPipeline_opts.h.

1944 {
1945 I32 neg = (x < 0.0f);
1946 x = if_then_else(neg, -x, x);
1947 const float c3 = -0.0187293f;
1948 const float c2 = 0.0742610f;
1949 const float c1 = -0.2121144f;
1950 const float c0 = 1.5707288f;
1951 F poly = mad(x, mad(x, mad(x, c3, c2), c1), c0);
1952 x = nmad(sqrt_(1 - x), poly, SK_FloatPI/2);
1953 x = if_then_else(neg, -x, x);
1954 return x;
1955}

◆ atan2_()

SI F SK_OPTS_NS::atan2_ ( F  y0,
F  x0 
)

Definition at line 1965 of file SkRasterPipeline_opts.h.

1965 {
1966 I32 flip = (abs_(y0) > abs_(x0));
1967 F y = if_then_else(flip, x0, y0);
1968 F x = if_then_else(flip, y0, x0);
1969 F arg = y/x;
1970
1971 I32 neg = (arg < 0.0f);
1972 arg = if_then_else(neg, -arg, arg);
1973
1974 F r = approx_atan_unit(arg);
1975 r = if_then_else(flip, SK_FloatPI/2 - r, r);
1976 r = if_then_else(neg, -r, r);
1977
1978 // handle quadrant distinctions
1979 r = if_then_else((y0 >= 0) & (x0 < 0), r + SK_FloatPI, r);
1980 r = if_then_else((y0 < 0) & (x0 <= 0), r - SK_FloatPI, r);
1981 // Note: we don't try to handle 0,0 or infinities
1982 return r;
1983}
SI F approx_atan_unit(F x)

◆ atan2_fn()

SI void SK_OPTS_NS::atan2_fn ( F dst,
F src 
)

Definition at line 4643 of file SkRasterPipeline_opts.h.

4643 {
4644 *dst = atan2_(*dst, *src);
4645}
SI F atan2_(F y0, F x0)

◆ atan_()

SI F SK_OPTS_NS::atan_ ( F  x)

Definition at line 1930 of file SkRasterPipeline_opts.h.

1930 {
1931 I32 neg = (x < 0.0f);
1932 x = if_then_else(neg, -x, x);
1933 I32 flip = (x > 1.0f);
1934 x = if_then_else(flip, 1/x, x);
1935 x = approx_atan_unit(x);
1936 x = if_then_else(flip, SK_FloatPI/2 - x, x);
1937 x = if_then_else(neg, -x, x);
1938 return x;
1939}

◆ bicubic_wts()

SI F SK_OPTS_NS::bicubic_wts ( F  t,
float  A,
float  B,
float  C,
float  D 
)

Definition at line 3633 of file SkRasterPipeline_opts.h.

3633 {
3634 return mad(t, mad(t, mad(t, D, C), B), A);
3635}

◆ bicubic_x()

template<int kScale>
SI void SK_OPTS_NS::bicubic_x ( SkRasterPipeline_SamplerCtx ctx,
F x 
)

Definition at line 3638 of file SkRasterPipeline_opts.h.

3638 {
3639 *x = sk_unaligned_load<F>(ctx->x) + (kScale * 0.5f);
3640
3641 F scalex;
3642 if (kScale == -3) { scalex = sk_unaligned_load<F>(ctx->wx[0]); }
3643 if (kScale == -1) { scalex = sk_unaligned_load<F>(ctx->wx[1]); }
3644 if (kScale == +1) { scalex = sk_unaligned_load<F>(ctx->wx[2]); }
3645 if (kScale == +3) { scalex = sk_unaligned_load<F>(ctx->wx[3]); }
3646 sk_unaligned_store(ctx->scalex, scalex);
3647}
static SK_ALWAYS_INLINE void SK_FP_SAFE_ABI sk_unaligned_store(P *ptr, T val)
Definition: SkUtils.h:61
float scalex[SkRasterPipeline_kMaxStride_highp]
float x[SkRasterPipeline_kMaxStride_highp]
float wx[4][SkRasterPipeline_kMaxStride_highp]

◆ bicubic_y()

template<int kScale>
SI void SK_OPTS_NS::bicubic_y ( SkRasterPipeline_SamplerCtx ctx,
F y 
)

Definition at line 3649 of file SkRasterPipeline_opts.h.

3649 {
3650 *y = sk_unaligned_load<F>(ctx->y) + (kScale * 0.5f);
3651
3652 F scaley;
3653 if (kScale == -3) { scaley = sk_unaligned_load<F>(ctx->wy[0]); }
3654 if (kScale == -1) { scaley = sk_unaligned_load<F>(ctx->wy[1]); }
3655 if (kScale == +1) { scaley = sk_unaligned_load<F>(ctx->wy[2]); }
3656 if (kScale == +3) { scaley = sk_unaligned_load<F>(ctx->wy[3]); }
3657 sk_unaligned_store(ctx->scaley, scaley);
3658}
float wy[4][SkRasterPipeline_kMaxStride_highp]
float scaley[SkRasterPipeline_kMaxStride_highp]
float y[SkRasterPipeline_kMaxStride_highp]

◆ bilinear_x()

template<int kScale>
SI void SK_OPTS_NS::bilinear_x ( SkRasterPipeline_SamplerCtx ctx,
F x 
)

Definition at line 3595 of file SkRasterPipeline_opts.h.

3595 {
3596 *x = sk_unaligned_load<F>(ctx->x) + (kScale * 0.5f);
3597 F fx = sk_unaligned_load<F>(ctx->fx);
3598
3599 F scalex;
3600 if (kScale == -1) { scalex = 1.0f - fx; }
3601 if (kScale == +1) { scalex = fx; }
3602 sk_unaligned_store(ctx->scalex, scalex);
3603}
float fx[SkRasterPipeline_kMaxStride_highp]

◆ bilinear_y()

template<int kScale>
SI void SK_OPTS_NS::bilinear_y ( SkRasterPipeline_SamplerCtx ctx,
F y 
)

Definition at line 3605 of file SkRasterPipeline_opts.h.

3605 {
3606 *y = sk_unaligned_load<F>(ctx->y) + (kScale * 0.5f);
3607 F fy = sk_unaligned_load<F>(ctx->fy);
3608
3609 F scaley;
3610 if (kScale == -1) { scaley = 1.0f - fy; }
3611 if (kScale == +1) { scaley = fy; }
3612 sk_unaligned_store(ctx->scaley, scaley);
3613}
float fy[SkRasterPipeline_kMaxStride_highp]

◆ bitwise_and_fn()

SI void SK_OPTS_NS::bitwise_and_fn ( I32 dst,
I32 src 
)

Definition at line 4593 of file SkRasterPipeline_opts.h.

4593 {
4594 *dst &= *src;
4595}

◆ bitwise_or_fn()

SI void SK_OPTS_NS::bitwise_or_fn ( I32 dst,
I32 src 
)

Definition at line 4597 of file SkRasterPipeline_opts.h.

4597 {
4598 *dst |= *src;
4599}

◆ bitwise_xor_fn()

SI void SK_OPTS_NS::bitwise_xor_fn ( I32 dst,
I32 src 
)

Definition at line 4601 of file SkRasterPipeline_opts.h.

4601 {
4602 *dst ^= *src;
4603}

◆ BLEND_MODE() [1/23]

SK_OPTS_NS::BLEND_MODE ( clear  )

Definition at line 2174 of file SkRasterPipeline_opts.h.

2174{ return F0; }

◆ BLEND_MODE() [2/23]

SK_OPTS_NS::BLEND_MODE ( colorburn  )

Definition at line 2207 of file SkRasterPipeline_opts.h.

2207 {
2208 return if_then_else(d == da, d + s*inv(da),
2209 if_then_else(s == 0, /* s + */ d*inv(sa),
2210 sa*(da - min(da, (da-d)*sa*rcp_fast(s))) + s*inv(da) + d*inv(sa)));
2211}
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE auto & d
Definition: main.cc:19
struct MyStruct s

◆ BLEND_MODE() [3/23]

SK_OPTS_NS::BLEND_MODE ( colordodge  )

Definition at line 2212 of file SkRasterPipeline_opts.h.

2212 {
2213 return if_then_else(d == 0, /* d + */ s*inv(da),
2214 if_then_else(s == sa, s + d*inv(sa),
2215 sa*min(da, (d*sa)*rcp_fast(sa - s)) + s*inv(da) + d*inv(sa)));
2216}

◆ BLEND_MODE() [4/23]

SK_OPTS_NS::BLEND_MODE ( darken  )

Definition at line 2202 of file SkRasterPipeline_opts.h.

2202{ return s + d - max(s*da, d*sa) ; }

◆ BLEND_MODE() [5/23]

SK_OPTS_NS::BLEND_MODE ( difference  )

Definition at line 2204 of file SkRasterPipeline_opts.h.

2204{ return s + d - two(min(s*da, d*sa)); }

◆ BLEND_MODE() [6/23]

SK_OPTS_NS::BLEND_MODE ( dstatop  )

Definition at line 2176 of file SkRasterPipeline_opts.h.

2176{ return mad(d, sa, s*inv(da)); }

◆ BLEND_MODE() [7/23]

SK_OPTS_NS::BLEND_MODE ( dstin  )

Definition at line 2178 of file SkRasterPipeline_opts.h.

2178{ return d * sa; }

◆ BLEND_MODE() [8/23]

SK_OPTS_NS::BLEND_MODE ( dstout  )

Definition at line 2180 of file SkRasterPipeline_opts.h.

2180{ return d * inv(sa); }

◆ BLEND_MODE() [9/23]

SK_OPTS_NS::BLEND_MODE ( dstover  )

Definition at line 2182 of file SkRasterPipeline_opts.h.

2182{ return mad(s, inv(da), d); }

◆ BLEND_MODE() [10/23]

SK_OPTS_NS::BLEND_MODE ( exclusion  )

Definition at line 2205 of file SkRasterPipeline_opts.h.

2205{ return s + d - two(s*d); }

◆ BLEND_MODE() [11/23]

SK_OPTS_NS::BLEND_MODE ( hardlight  )

Definition at line 2217 of file SkRasterPipeline_opts.h.

2217 {
2218 return s*inv(da) + d*inv(sa)
2219 + if_then_else(two(s) <= sa, two(s*d), sa*da - two((da-d)*(sa-s)));
2220}

◆ BLEND_MODE() [12/23]

SK_OPTS_NS::BLEND_MODE ( lighten  )

Definition at line 2203 of file SkRasterPipeline_opts.h.

2203{ return s + d - min(s*da, d*sa) ; }

◆ BLEND_MODE() [13/23]

SK_OPTS_NS::BLEND_MODE ( modulate  )

Definition at line 2184 of file SkRasterPipeline_opts.h.

2184{ return s*d; }

◆ BLEND_MODE() [14/23]

SK_OPTS_NS::BLEND_MODE ( multiply  )

Definition at line 2185 of file SkRasterPipeline_opts.h.

2185{ return mad(s, d, mad(s, inv(da), d*inv(sa))); }

◆ BLEND_MODE() [15/23]

SK_OPTS_NS::BLEND_MODE ( overlay  )

Definition at line 2221 of file SkRasterPipeline_opts.h.

2221 {
2222 return s*inv(da) + d*inv(sa)
2223 + if_then_else(two(d) <= da, two(s*d), sa*da - two((da-d)*(sa-s)));
2224}

◆ BLEND_MODE() [16/23]

SK_OPTS_NS::BLEND_MODE ( plus_  )

Definition at line 2186 of file SkRasterPipeline_opts.h.

2186{ return min(s + d, 1.0f); } // We can clamp to either 1 or sa.

◆ BLEND_MODE() [17/23]

SK_OPTS_NS::BLEND_MODE ( screen  )

Definition at line 2187 of file SkRasterPipeline_opts.h.

2187{ return nmad(s, d, s + d); }

◆ BLEND_MODE() [18/23]

SK_OPTS_NS::BLEND_MODE ( softlight  )

Definition at line 2226 of file SkRasterPipeline_opts.h.

2226 {
2227 F m = if_then_else(da > 0, d / da, 0.0f),
2228 s2 = two(s),
2229 m4 = two(two(m));
2230
2231 // The logic forks three ways:
2232 // 1. dark src?
2233 // 2. light src, dark dst?
2234 // 3. light src, light dst?
2235 F darkSrc = d*(sa + (s2 - sa)*(1.0f - m)), // Used in case 1.
2236 darkDst = (m4*m4 + m4)*(m - 1.0f) + 7.0f*m, // Used in case 2.
2237 liteDst = sqrt_(m) - m,
2238 liteSrc = d*sa + da*(s2 - sa) * if_then_else(two(two(d)) <= da, darkDst, liteDst); // 2 or 3?
2239 return s*inv(da) + d*inv(sa) + if_then_else(s2 <= sa, darkSrc, liteSrc); // 1 or (2 or 3)?
2240}

◆ BLEND_MODE() [19/23]

SK_OPTS_NS::BLEND_MODE ( srcatop  )

Definition at line 2175 of file SkRasterPipeline_opts.h.

2175{ return mad(s, da, d*inv(sa)); }

◆ BLEND_MODE() [20/23]

SK_OPTS_NS::BLEND_MODE ( srcin  )

Definition at line 2177 of file SkRasterPipeline_opts.h.

2177{ return s * da; }

◆ BLEND_MODE() [21/23]

SK_OPTS_NS::BLEND_MODE ( srcout  )

Definition at line 2179 of file SkRasterPipeline_opts.h.

2179{ return s * inv(da); }

◆ BLEND_MODE() [22/23]

SK_OPTS_NS::BLEND_MODE ( srcover  )

Definition at line 2181 of file SkRasterPipeline_opts.h.

2181{ return mad(d, inv(sa), s); }

◆ BLEND_MODE() [23/23]

SK_OPTS_NS::BLEND_MODE ( xor_  )

Definition at line 2188 of file SkRasterPipeline_opts.h.

2188{ return mad(s, inv(da), d*inv(sa)); }

◆ blit_mask_d32_a8()

void SK_OPTS_NS::blit_mask_d32_a8 ( SkPMColor dst,
size_t  dstRB,
const SkAlpha mask,
size_t  maskRB,
SkColor  color,
int  w,
int  h 
)
inline

Definition at line 400 of file SkBlitMask_opts.h.

402 {
403 if (color == SK_ColorBLACK) {
404 blit_mask_d32_a8_black(dst, dstRB, mask, maskRB, w, h);
405 } else if (SkColorGetA(color) == 0xFF) {
406 blit_mask_d32_a8_opaque(dst, dstRB, mask, maskRB, color, w, h);
407 } else {
408 blit_mask_d32_a8_general(dst, dstRB, mask, maskRB, color, w, h);
409 }
410}
constexpr SkColor SK_ColorBLACK
Definition: SkColor.h:103
#define SkColorGetA(color)
Definition: SkColor.h:61
DlColor color
static void blit_mask_d32_a8_opaque(SkPMColor *dst, size_t dstRB, const SkAlpha *mask, size_t maskRB, SkColor color, int w, int h)
static void blit_mask_d32_a8_general(SkPMColor *dst, size_t dstRB, const SkAlpha *mask, size_t maskRB, SkColor color, int w, int h)
static void blit_mask_d32_a8_black(SkPMColor *dst, size_t dstRB, const SkAlpha *maskPtr, size_t maskRB, int width, int height)
dst
Definition: cp.py:12
SkScalar w
SkScalar h

◆ blit_mask_d32_a8_black()

static void SK_OPTS_NS::blit_mask_d32_a8_black ( SkPMColor dst,
size_t  dstRB,
const SkAlpha maskPtr,
size_t  maskRB,
int  width,
int  height 
)
static

Definition at line 275 of file SkBlitMask_opts.h.

277 {
279 const uint8_t* SK_RESTRICT mask = (const uint8_t*)maskPtr;
280
281 // Nine patch may set maskRB to 0 to blit the same row repeatedly.
282 ptrdiff_t mask_adjust = (ptrdiff_t)maskRB - width;
283 dstRB -= (width << 2);
284 const __m128i zeros = __lsx_vldi(0);
285 __m128i planar = __lsx_vldi(0);
286 planar = __lsx_vinsgr2vr_d(planar, 0x0d0905010c080400, 0);
287 planar = __lsx_vinsgr2vr_d(planar, 0x0f0b07030e0a0602, 1);
288
289 do {
290 int w = width;
291 while (w >= 8) {
292 __m128i vmask = __lsx_vld(mask, 0);
293 vmask = __lsx_vilvl_b(zeros, vmask);
294 __m128i vscale = __lsx_vsub_h(__lsx_vreplgr2vr_h(256), vmask);
295 __m128i lo = __lsx_vld(device, 0); // bgra bgra bgra bgra
296 __m128i hi = __lsx_vld(device, 16); // BGRA BGRA BGRA BGRA
297 lo = __lsx_vshuf_b(zeros, lo, planar); // bbbb gggg rrrr aaaa
298 hi = __lsx_vshuf_b(zeros, hi, planar); // BBBB GGGG RRRR AAAA
299 __m128i bg = __lsx_vilvl_w(hi, lo), // bbbb BBBB gggg GGGG
300 ra = __lsx_vilvh_w(hi, lo); // rrrr RRRR aaaa AAAA
301
302 __m128i b = __lsx_vilvl_b(zeros, bg), // _b_b _b_b _B_B _B_B
303 g = __lsx_vilvh_b(zeros, bg), // _g_g _g_g _G_G _G_G
304 r = __lsx_vilvl_b(zeros, ra), // _r_r _r_r _R_R _R_R
305 a = __lsx_vilvh_b(zeros, ra); // _a_a _a_a _A_A _A_A
306
307 b = SkAlphaMul_lsx(b, vscale);
308 g = SkAlphaMul_lsx(g, vscale);
309 r = SkAlphaMul_lsx(r, vscale);
310 a = SkAlphaMul_lsx(a, vscale);
311
312 a += vmask;
313
314 bg = __lsx_vor_v(b, __lsx_vslli_h(g, 8)); // bgbg bgbg BGBG BGBG
315 ra = __lsx_vor_v(r, __lsx_vslli_h(a, 8)); // rara rara RARA RARA
316 lo = __lsx_vilvl_h(ra, bg); // bgra bgra bgra bgra
317 hi = __lsx_vilvh_h(ra, bg); // BGRA BGRA BGRA BGRA
318
319 __lsx_vst(lo, device, 0);
320 __lsx_vst(hi, device, 16);
321
322 mask += 8;
323 device += 8;
324 w -= 8;
325 }
326
327 while (w-- > 0) {
328 unsigned aa = *mask++;
329 *device = (aa << SK_A32_SHIFT)
330 + SkAlphaMulQ(*device, SkAlpha255To256(255 - aa));
331 device += 1;
332 }
333
334 device = (uint32_t*)((char*)device + dstRB);
335 mask += mask_adjust;
336
337 } while (--height != 0);
338 }
static SK_ALWAYS_INLINE uint32_t SkAlphaMulQ(uint32_t c, unsigned scale)
Definition: SkColorPriv.h:142
static unsigned SkAlpha255To256(U8CPU alpha)
Definition: SkColorPriv.h:24
uint32_t SkPMColor
Definition: SkColor.h:205
#define SK_RESTRICT
Definition: SkFeatures.h:42
#define SK_A32_SHIFT
Definition: SkTypes.h:54
VkDevice device
Definition: main.cc:53
static bool b
static __m128i SkAlphaMul_lsx(__m128i x, __m128i y)
int32_t height
int32_t width

◆ blit_mask_d32_a8_general()

static void SK_OPTS_NS::blit_mask_d32_a8_general ( SkPMColor dst,
size_t  dstRB,
const SkAlpha mask,
size_t  maskRB,
SkColor  color,
int  w,
int  h 
)
static

Definition at line 262 of file SkBlitMask_opts.h.

264 {
265 D32_A8_Opaque_Color_lsx<true>(dst, dstRB, mask, maskRB, color, w, h);
266 }

◆ blit_mask_d32_a8_opaque()

static void SK_OPTS_NS::blit_mask_d32_a8_opaque ( SkPMColor dst,
size_t  dstRB,
const SkAlpha mask,
size_t  maskRB,
SkColor  color,
int  w,
int  h 
)
static

Definition at line 268 of file SkBlitMask_opts.h.

270 {
271 D32_A8_Opaque_Color_lsx<false>(dst, dstRB, mask, maskRB, color, w, h);
272 }

◆ blit_row_color32()

void SK_OPTS_NS::blit_row_color32 ( SkPMColor dst,
int  count,
SkPMColor  color 
)
inline

Definition at line 243 of file SkBlitRow_opts.h.

243 {
244 constexpr int N = 4; // 8, 16 also reasonable choices
248
249 auto kernel = [color](U32 src) {
250 unsigned invA = 255 - SkGetPackedA32(color);
251 invA += invA >> 7;
252 SkASSERT(0 < invA && invA < 256); // We handle alpha == 0 or alpha == 255 specially.
253
254 // (src * invA + (color << 8) + 128) >> 8
255 // Should all fit in 16 bits.
256 U8 s = sk_bit_cast<U8>(src),
257 a = U8(invA);
258 U16 c = skvx::cast<uint16_t>(sk_bit_cast<U8>(U32(color))),
259 d = (mull(s,a) + (c << 8) + 128)>>8;
260 return sk_bit_cast<U32>(skvx::cast<uint8_t>(d));
261 };
262
263 while (count >= N) {
264 kernel(U32::Load(dst)).store(dst);
265 dst += N;
266 count -= N;
267 }
268 while (count --> 0) {
269 *dst = kernel(U32{*dst})[0];
270 dst++;
271 }
272}
#define SkASSERT(cond)
Definition: SkAssert.h:116
#define SkGetPackedA32(packed)
Definition: SkColorPriv.h:92
V< uint8_t > U8
Definition: Transform_inl.h:19
V< uint32_t > U32
Definition: Transform_inl.h:17
#define N
Definition: beziers.cpp:19
SIN Vec< N, uint16_t > mull(const Vec< N, uint8_t > &x, const Vec< N, uint8_t > &y)
Definition: SkVx.h:906
Definition: SkVx.h:83

◆ blit_row_s32a_opaque()

void SK_OPTS_NS::blit_row_s32a_opaque ( SkPMColor dst,
const SkPMColor src,
int  len,
U8CPU  alpha 
)
inline

Definition at line 164 of file SkBlitRow_opts.h.

164 {
165 SkASSERT(alpha == 0xFF);
167
168#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
169 while (len >= 8) {
170 _mm256_storeu_si256((__m256i*)dst,
171 SkPMSrcOver_AVX2(_mm256_loadu_si256((const __m256i*)src),
172 _mm256_loadu_si256((const __m256i*)dst)));
173 src += 8;
174 dst += 8;
175 len -= 8;
176 }
177#endif
178
179#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
180 while (len >= 4) {
181 _mm_storeu_si128((__m128i*)dst, SkPMSrcOver_SSE2(_mm_loadu_si128((const __m128i*)src),
182 _mm_loadu_si128((const __m128i*)dst)));
183 src += 4;
184 dst += 4;
185 len -= 4;
186 }
187#endif
188
189#if defined(SK_ARM_HAS_NEON)
190 while (len >= 8) {
191 vst4_u8((uint8_t*)dst, SkPMSrcOver_neon8(vld4_u8((const uint8_t*)dst),
192 vld4_u8((const uint8_t*)src)));
193 src += 8;
194 dst += 8;
195 len -= 8;
196 }
197
198 while (len >= 2) {
199 vst1_u8((uint8_t*)dst, SkPMSrcOver_neon2(vld1_u8((const uint8_t*)dst),
200 vld1_u8((const uint8_t*)src)));
201 src += 2;
202 dst += 2;
203 len -= 2;
204 }
205
206 if (len != 0) {
207 uint8x8_t result = SkPMSrcOver_neon2(vcreate_u8((uint64_t)*dst),
208 vcreate_u8((uint64_t)*src));
209 vst1_lane_u32(dst, vreinterpret_u32_u8(result), 0);
210 }
211 return;
212#endif
213
214#if SK_CPU_LSX_LEVEL >= SK_CPU_LSX_LEVEL_LASX
215 while (len >= 8) {
216 __lasx_xvst(SkPMSrcOver_LASX(__lasx_xvld(src, 0),
217 __lasx_xvld(dst, 0)), (__m256i*)dst, 0);
218 src += 8;
219 dst += 8;
220 len -= 8;
221 }
222#endif
223
224#if SK_CPU_LSX_LEVEL >= SK_CPU_LSX_LEVEL_LSX
225 while (len >= 4) {
226 __lsx_vst(SkPMSrcOver_LSX(__lsx_vld(src, 0),
227 __lsx_vld(dst, 0)), (__m128i*)dst, 0);
228 src += 4;
229 dst += 4;
230 len -= 4;
231 }
232#endif
233
234 while (len --> 0) {
235 *dst = SkPMSrcOver(*src, *dst);
236 src++;
237 dst++;
238 }
239}
static __m128i SkPMSrcOver_LSX(const __m128i &src, const __m128i &dst)
static __m256i SkPMSrcOver_LASX(const __m256i &src, const __m256i &dst)
static __m128i SkPMSrcOver_SSE2(const __m128i &src, const __m128i &dst)
static __m256i SkPMSrcOver_AVX2(const __m256i &src, const __m256i &dst)
static SkPMColor SkPMSrcOver(SkPMColor src, SkPMColor dst)
Definition: SkColorPriv.h:150
static void sk_msan_assert_initialized(const void *begin, const void *end)
Definition: SkMSAN.h:24
GAsyncResult * result

◆ cast()

SI F SK_OPTS_NS::cast ( U32  v)

Definition at line 1337 of file SkRasterPipeline_opts.h.

1337{ return (F)v; }

◆ cast64()

SI F SK_OPTS_NS::cast64 ( U64  v)

Definition at line 1338 of file SkRasterPipeline_opts.h.

1338{ return (F)v; }

◆ cast_to_float_from_fn()

template<typename T >
SI void SK_OPTS_NS::cast_to_float_from_fn ( T dst)

Definition at line 4376 of file SkRasterPipeline_opts.h.

4376 {
4377 *dst = sk_bit_cast<T>((F)*dst);
4378}

◆ cast_to_int_from_fn()

SI void SK_OPTS_NS::cast_to_int_from_fn ( F dst)

Definition at line 4379 of file SkRasterPipeline_opts.h.

4379 {
4380 *dst = sk_bit_cast<F>((I32)*dst);
4381}

◆ cast_to_uint_from_fn()

SI void SK_OPTS_NS::cast_to_uint_from_fn ( F dst)

Definition at line 4382 of file SkRasterPipeline_opts.h.

4382 {
4383 *dst = sk_bit_cast<F>((U32)*dst);
4384}

◆ ceil_()

SI F SK_OPTS_NS::ceil_ ( F  v)

Definition at line 153 of file SkRasterPipeline_opts.h.

153{ return ceilf(v); }

◆ ceil_fn()

SI void SK_OPTS_NS::ceil_fn ( F dst)

Definition at line 4406 of file SkRasterPipeline_opts.h.

4406 {
4407 *dst = ceil_(*dst);
4408}

◆ clamp()

SI F SK_OPTS_NS::clamp ( F  v,
F  limit 
)

Definition at line 1825 of file SkRasterPipeline_opts.h.

1825 {
1826 F inclusive = sk_bit_cast<F>(sk_bit_cast<U32>(limit) - 1); // Exclusive -> inclusive.
1827 return min(max(0.0f, v), inclusive);
1828}

◆ clamp_01_()

SI F SK_OPTS_NS::clamp_01_ ( F  v)

Definition at line 2371 of file SkRasterPipeline_opts.h.

2371{ return min(max(0.0f, v), 1.0f); }

◆ clamp_ex()

SI F SK_OPTS_NS::clamp_ex ( F  v,
float  limit 
)

Definition at line 1831 of file SkRasterPipeline_opts.h.

1831 {
1832 const F inclusiveZ = F_(std::numeric_limits<float>::min()),
1833 inclusiveL = sk_bit_cast<F>( sk_bit_cast<U32>(F_(limit)) - 1 );
1834 return min(max(inclusiveZ, v), inclusiveL);
1835}

◆ clip_channel()

SI F SK_OPTS_NS::clip_channel ( F  c,
F  l,
I32  clip_low,
I32  clip_high,
F  mn_scale,
F  mx_scale 
)

Definition at line 2271 of file SkRasterPipeline_opts.h.

2271 {
2272 c = if_then_else(clip_low, mad(mn_scale, c - l, l), c);
2273 c = if_then_else(clip_high, mad(mx_scale, c - l, l), c);
2274 c = max(c, 0.0f); // Sometimes without this we may dip just a little negative.
2275 return c;
2276}

◆ clip_color()

SI void SK_OPTS_NS::clip_color ( F r,
F g,
F b,
F  a 
)

Definition at line 2277 of file SkRasterPipeline_opts.h.

2277 {
2278 F mn = min(*r, min(*g, *b)),
2279 mx = max(*r, max(*g, *b)),
2280 l = lum(*r, *g, *b),
2281 mn_scale = ( l) * rcp_fast(l - mn),
2282 mx_scale = (a - l) * rcp_fast(mx - l);
2283 I32 clip_low = cond_to_mask(mn < 0 && l != mn),
2284 clip_high = cond_to_mask(mx > a && l != mx);
2285
2286 *r = clip_channel(*r, l, clip_low, clip_high, mn_scale, mx_scale);
2287 *g = clip_channel(*g, l, clip_low, clip_high, mn_scale, mx_scale);
2288 *b = clip_channel(*b, l, clip_low, clip_high, mn_scale, mx_scale);
2289}
static float lum(float r, float g, float b)
Definition: hsl.cpp:52
SI F clip_channel(F c, F l, I32 clip_low, I32 clip_high, F mn_scale, F mx_scale)
SI I32 cond_to_mask(I32 cond)

◆ cmpeq_fn()

template<typename T >
SI void SK_OPTS_NS::cmpeq_fn ( T dst,
T src 
)

Definition at line 4630 of file SkRasterPipeline_opts.h.

4630 {
4631 static_assert(sizeof(T) == sizeof(I32));
4632 I32 result = cond_to_mask(*dst == *src);
4633 memcpy(dst, &result, sizeof(I32));
4634}

◆ cmple_fn()

template<typename T >
SI void SK_OPTS_NS::cmple_fn ( T dst,
T src 
)

Definition at line 4623 of file SkRasterPipeline_opts.h.

4623 {
4624 static_assert(sizeof(T) == sizeof(I32));
4625 I32 result = cond_to_mask(*dst <= *src);
4626 memcpy(dst, &result, sizeof(I32));
4627}

◆ cmplt_fn()

template<typename T >
SI void SK_OPTS_NS::cmplt_fn ( T dst,
T src 
)

Definition at line 4616 of file SkRasterPipeline_opts.h.

4616 {
4617 static_assert(sizeof(T) == sizeof(I32));
4618 I32 result = cond_to_mask(*dst < *src);
4619 memcpy(dst, &result, sizeof(I32));
4620}

◆ cmpne_fn()

template<typename T >
SI void SK_OPTS_NS::cmpne_fn ( T dst,
T src 
)

Definition at line 4637 of file SkRasterPipeline_opts.h.

4637 {
4638 static_assert(sizeof(T) == sizeof(I32));
4639 I32 result = cond_to_mask(*dst != *src);
4640 memcpy(dst, &result, sizeof(I32));
4641}

◆ compute_perlin_vector()

SI F SK_OPTS_NS::compute_perlin_vector ( U32  sample,
F  x,
F  y 
)

Definition at line 3691 of file SkRasterPipeline_opts.h.

3691 {
3692 // We're relying on the packing of uint16s within a uint32, which will vary based on endianness.
3693#ifdef SK_CPU_BENDIAN
3694 U32 sampleLo = sample >> 16;
3695 U32 sampleHi = sample & 0xFFFF;
3696#else
3697 U32 sampleLo = sample & 0xFFFF;
3698 U32 sampleHi = sample >> 16;
3699#endif
3700
3701 // Convert 32-bit sample value into two floats in the [-1..1] range.
3702 F vecX = mad(cast(sampleLo), 2.0f / 65535.0f, -1.0f);
3703 F vecY = mad(cast(sampleHi), 2.0f / 65535.0f, -1.0f);
3704
3705 // Return the dot of the sample and the passed-in vector.
3706 return mad(vecX, x,
3707 vecY * y);
3708}

◆ cond_to_mask()

SI I32 SK_OPTS_NS::cond_to_mask ( I32  cond)

Definition at line 2010 of file SkRasterPipeline_opts.h.

2010 {
2011#if defined(JUMPER_IS_SCALAR)
2012 // In scalar mode, conditions are bools (0 or 1), but we want to store and operate on masks
2013 // (eg, using bitwise operations to select values).
2014 return if_then_else(cond, I32(~0), I32(0));
2015#else
2016 // In SIMD mode, our various instruction sets already represent conditions as masks.
2017 return cond;
2018#endif
2019}

◆ copy_n_immutable_unmasked_fn()

template<int NumSlots>
SI void SK_OPTS_NS::copy_n_immutable_unmasked_fn ( SkRasterPipeline_BinaryOpCtx packed,
std::byte *  base 
)

Definition at line 4139 of file SkRasterPipeline_opts.h.

4139 {
4140 auto ctx = SkRPCtxUtils::Unpack(packed);
4141
4142 // Load the scalar values.
4143 float* src = (float*)(base + ctx.src);
4144 float values[NumSlots];
4145 SK_UNROLL for (int index = 0; index < NumSlots; ++index) {
4146 values[index] = src[index];
4147 }
4148 // Broadcast the scalars into the destination.
4149 F* dst = (F*)(base + ctx.dst);
4150 SK_UNROLL for (int index = 0; index < NumSlots; ++index) {
4151 dst[index] = F_(values[index]);
4152 }
4153}

◆ copy_n_slots_masked_fn()

template<int NumSlots>
SI void SK_OPTS_NS::copy_n_slots_masked_fn ( SkRasterPipeline_BinaryOpCtx packed,
std::byte *  base,
I32  mask 
)

Definition at line 4169 of file SkRasterPipeline_opts.h.

4169 {
4170 auto ctx = SkRPCtxUtils::Unpack(packed);
4171 I32* dst = (I32*)(base + ctx.dst);
4172 I32* src = (I32*)(base + ctx.src);
4173 SK_UNROLL for (int count = 0; count < NumSlots; ++count) {
4174 *dst = if_then_else(mask, *src, *dst);
4175 dst += 1;
4176 src += 1;
4177 }
4178}

◆ copy_n_slots_unmasked_fn()

template<int NumSlots>
SI void SK_OPTS_NS::copy_n_slots_unmasked_fn ( SkRasterPipeline_BinaryOpCtx packed,
std::byte *  base 
)

Definition at line 4118 of file SkRasterPipeline_opts.h.

4118 {
4119 auto ctx = SkRPCtxUtils::Unpack(packed);
4120 F* dst = (F*)(base + ctx.dst);
4121 F* src = (F*)(base + ctx.src);
4122 memcpy(dst, src, sizeof(F) * NumSlots);
4123}

◆ cos_()

SI F SK_OPTS_NS::cos_ ( F  x)

Definition at line 1857 of file SkRasterPipeline_opts.h.

1857 {
1858 constexpr float one_over_pi2 = 1 / (2 * SK_FloatPI);
1859 x *= one_over_pi2;
1860 x = 0.25f - abs_(x - floor_(x + 0.5f));
1861 return sin5q_(x);
1862}
SI F floor_(F x)

◆ css_hsl_to_srgb_()

SI RGB SK_OPTS_NS::css_hsl_to_srgb_ ( F  h,
F  s,
F  l 
)

Definition at line 2570 of file SkRasterPipeline_opts.h.

2570 {
2571 h = mod_(h, 360);
2572
2573 s *= 0.01f;
2574 l *= 0.01f;
2575
2576 F k[3] = {
2577 mod_(0 + h * (1 / 30.0f), 12),
2578 mod_(8 + h * (1 / 30.0f), 12),
2579 mod_(4 + h * (1 / 30.0f), 12)
2580 };
2581 F a = s * min(l, 1 - l);
2582 return {
2583 l - a * max(-1.0f, min(min(k[0] - 3.0f, 9.0f - k[0]), 1.0f)),
2584 l - a * max(-1.0f, min(min(k[1] - 3.0f, 9.0f - k[1]), 1.0f)),
2585 l - a * max(-1.0f, min(min(k[2] - 3.0f, 9.0f - k[2]), 1.0f))
2586 };
2587}
SI F mod_(F x, float y)

◆ D32_A8_Opaque_Color_lsx()

template<bool isColor>
static void SK_OPTS_NS::D32_A8_Opaque_Color_lsx ( void *SK_RESTRICT  dst,
size_t  dstRB,
const void *SK_RESTRICT  maskPtr,
size_t  maskRB,
SkColor  color,
int  width,
int  height 
)
static

Definition at line 174 of file SkBlitMask_opts.h.

176 {
179 const uint8_t* SK_RESTRICT mask = (const uint8_t*)maskPtr;
180 __m128i vpmc_b = __lsx_vldi(0);
181 __m128i vpmc_g = __lsx_vldi(0);
182 __m128i vpmc_r = __lsx_vldi(0);
183 __m128i vpmc_a = __lsx_vldi(0);
184
185 // Nine patch may set maskRB to 0 to blit the same row repeatedly.
186 ptrdiff_t mask_adjust = (ptrdiff_t)maskRB - width;
187 dstRB -= (width << 2);
188
189 if (width >= 8) {
190 vpmc_b = __lsx_vreplgr2vr_h(SkGetPackedB32(pmc));
191 vpmc_g = __lsx_vreplgr2vr_h(SkGetPackedG32(pmc));
192 vpmc_r = __lsx_vreplgr2vr_h(SkGetPackedR32(pmc));
193 vpmc_a = __lsx_vreplgr2vr_h(SkGetPackedA32(pmc));
194 }
195
196 const __m128i zeros = __lsx_vldi(0);
197 __m128i planar = __lsx_vldi(0);
198 planar = __lsx_vinsgr2vr_d(planar, 0x0d0905010c080400, 0);
199 planar = __lsx_vinsgr2vr_d(planar, 0x0f0b07030e0a0602, 1);
200
201 do{
202 int w = width;
203 while(w >= 8){
204 __m128i lo = __lsx_vld(device, 0); // bgra bgra bgra bgra
205 __m128i hi = __lsx_vld(device, 16); // BGRA BGRA BGRA BGRA
206 lo = __lsx_vshuf_b(zeros, lo, planar); // bbbb gggg rrrr aaaa
207 hi = __lsx_vshuf_b(zeros, hi, planar); // BBBB GGGG RRRR AAAA
208 __m128i bg = __lsx_vilvl_w(hi, lo), // bbbb BBBB gggg GGGG
209 ra = __lsx_vilvh_w(hi, lo); // rrrr RRRR aaaa AAAA
210
211 __m128i b = __lsx_vilvl_b(zeros, bg), // _b_b _b_b _B_B _B_B
212 g = __lsx_vilvh_b(zeros, bg), // _g_g _g_g _G_G _G_G
213 r = __lsx_vilvl_b(zeros, ra), // _r_r _r_r _R_R _R_R
214 a = __lsx_vilvh_b(zeros, ra); // _a_a _a_a _A_A _A_A
215
216 __m128i vmask = __lsx_vld(mask, 0);
217 vmask = __lsx_vilvl_b(zeros, vmask);
218 __m128i vscale, vmask256 = __lsx_vadd_h(vmask, __lsx_vreplgr2vr_h(1));
219
220 if (isColor) {
221 __m128i tmp = SkAlphaMul_lsx(vpmc_a, vmask256);
222 vscale = __lsx_vsub_h(__lsx_vreplgr2vr_h(256), tmp);
223 } else {
224 vscale = __lsx_vsub_h(__lsx_vreplgr2vr_h(256), vmask);
225 }
226
227 b = SkAlphaMul_lsx(vpmc_b, vmask256) + SkAlphaMul_lsx(b, vscale);
228 g = SkAlphaMul_lsx(vpmc_g, vmask256) + SkAlphaMul_lsx(g, vscale);
229 r = SkAlphaMul_lsx(vpmc_r, vmask256) + SkAlphaMul_lsx(r, vscale);
230 a = SkAlphaMul_lsx(vpmc_a, vmask256) + SkAlphaMul_lsx(a, vscale);
231
232 bg = __lsx_vor_v(b, __lsx_vslli_h(g, 8)); // bgbg bgbg BGBG BGBG
233 ra = __lsx_vor_v(r, __lsx_vslli_h(a, 8)); // rara rara RARA RARA
234 lo = __lsx_vilvl_h(ra, bg); // bgra bgra bgra bgra
235 hi = __lsx_vilvh_h(ra, bg); // BGRA BGRA BGRA BGRA
236
237 __lsx_vst(lo, device, 0);
238 __lsx_vst(hi, device, 16);
239
240 mask += 8;
241 device += 8;
242 w -= 8;
243 }
244
245 while (w--) {
246 unsigned aa = *mask++;
247 if (isColor) {
248 *device = SkBlendARGB32(pmc, *device, aa);
249 } else {
251 + SkAlphaMulQ(*device, SkAlpha255To256(255 - aa));
252 }
253 device += 1;
254 }
255
256 device = (uint32_t *)((char*)device + dstRB);
257 mask += mask_adjust;
258
259 } while (--height != 0);
260 }
static SkPMColor SkBlendARGB32(SkPMColor src, SkPMColor dst, U8CPU aa)
Definition: SkColorData.h:274
#define SkGetPackedB32(packed)
Definition: SkColorPriv.h:95
#define SkGetPackedR32(packed)
Definition: SkColorPriv.h:93
#define SkGetPackedG32(packed)
Definition: SkColorPriv.h:94
SK_API SkPMColor SkPreMultiplyColor(SkColor c)
Definition: SkColor.cpp:21

◆ DECLARE_BINARY_FLOAT()

SK_OPTS_NS::DECLARE_BINARY_FLOAT ( add  )

◆ DECLARE_UNARY_INT()

SK_OPTS_NS::DECLARE_UNARY_INT ( cast_to_float_from  )

◆ decode_packed_coordinates_and_weight()

template<typename U32 , typename Out >
static void SK_OPTS_NS::decode_packed_coordinates_and_weight ( U32  packed,
Out *  v0,
Out *  v1,
Out *  w 
)
static

Definition at line 38 of file SkBitmapProcState_opts.h.

38 {
39 *v0 = (packed >> 18); // Integer coordinate x0 or y0.
40 *v1 = (packed & 0x3fff); // Integer coordinate x1 or y1.
41 *w = (packed >> 14) & 0xf; // Lerp weight for v1; weight for v0 is 16-w.
42}

◆ div_fn()

template<typename T >
SI void SK_OPTS_NS::div_fn ( T dst,
T src 
)

Definition at line 4584 of file SkRasterPipeline_opts.h.

4584 {
4585 T divisor = *src;
4586 if constexpr (!std::is_same_v<T, F>) {
4587 // We will crash if we integer-divide against zero. Convert 0 to ~0 to avoid this.
4588 divisor |= (T)cond_to_mask(divisor == 0);
4589 }
4590 *dst /= divisor;
4591}

◆ exclusive_mirror()

SI F SK_OPTS_NS::exclusive_mirror ( F  v,
const SkRasterPipeline_TileCtx ctx 
)

Definition at line 3238 of file SkRasterPipeline_opts.h.

3238 {
3239 auto limit = ctx->scale;
3240 auto invLimit = ctx->invScale;
3241
3242 // This is "repeat" over the range 0..2*limit
3243 auto u = v - floor_(v*invLimit*0.5f)*2*limit;
3244 // s will be 0 when moving forward (e.g. [0, limit)) and 1 when moving backward (e.g.
3245 // [limit, 2*limit)).
3246 auto s = floor_(u*invLimit);
3247 // This is the mirror result.
3248 auto m = u - 2*s*(u - limit);
3249 // Apply a bias to m if moving backwards so that we snap consistently at exact integer coords in
3250 // the logical infinite image. This is tested by mirror_tile GM. Note that all values
3251 // that have a non-zero bias applied are > 0.
3252 auto biasInUlps = trunc_(s);
3253 return sk_bit_cast<F>(sk_bit_cast<U32>(m) + ctx->mirrorBiasDir*biasInUlps);
3254}

◆ exclusive_repeat()

SI F SK_OPTS_NS::exclusive_repeat ( F  v,
const SkRasterPipeline_TileCtx ctx 
)

Definition at line 3235 of file SkRasterPipeline_opts.h.

3235 {
3236 return v - floor_(v*ctx->invScale)*ctx->scale;
3237}

◆ expand() [1/2]

SI U32 SK_OPTS_NS::expand ( U16  v)

Definition at line 1340 of file SkRasterPipeline_opts.h.

1340{ return (U32)v; }

◆ expand() [2/2]

SI U32 SK_OPTS_NS::expand ( U8  v)

Definition at line 1341 of file SkRasterPipeline_opts.h.

1341{ return (U32)v; }

◆ F_()

SI constexpr F SK_OPTS_NS::F_ ( float  x)
constexpr

Definition at line 1299 of file SkRasterPipeline_opts.h.

1299{ return x; }

◆ floor_()

SI F SK_OPTS_NS::floor_ ( F  v)

Definition at line 152 of file SkRasterPipeline_opts.h.

152{ return floorf(v); }

◆ floor_fn()

SI void SK_OPTS_NS::floor_fn ( F dst)

Definition at line 4402 of file SkRasterPipeline_opts.h.

4402 {
4403 *dst = floor_(*dst);
4404}

◆ fract()

SI F SK_OPTS_NS::fract ( F  v)

Definition at line 1356 of file SkRasterPipeline_opts.h.

1356{ return v - floor_(v); }

◆ from_10101010_xr()

SI void SK_OPTS_NS::from_10101010_xr ( U64  _10x6,
F r,
F g,
F b,
F a 
)

Definition at line 1795 of file SkRasterPipeline_opts.h.

1795 {
1796 *r = (cast64((_10x6 >> 6) & 0x3ff) - 384.f) / 510.f;
1797 *g = (cast64((_10x6 >> 22) & 0x3ff) - 384.f) / 510.f;
1798 *b = (cast64((_10x6 >> 38) & 0x3ff) - 384.f) / 510.f;
1799 *a = (cast64((_10x6 >> 54) & 0x3ff) - 384.f) / 510.f;
1800}

◆ from_1010102()

SI void SK_OPTS_NS::from_1010102 ( U32  rgba,
F r,
F g,
F b,
F a 
)

Definition at line 1780 of file SkRasterPipeline_opts.h.

1780 {
1781 *r = cast((rgba ) & 0x3ff) * (1/1023.0f);
1782 *g = cast((rgba >> 10) & 0x3ff) * (1/1023.0f);
1783 *b = cast((rgba >> 20) & 0x3ff) * (1/1023.0f);
1784 *a = cast((rgba >> 30) ) * (1/ 3.0f);
1785}
static const uint32_t rgba[kNumPixels]

◆ from_1010102_xr()

SI void SK_OPTS_NS::from_1010102_xr ( U32  rgba,
F r,
F g,
F b,
F a 
)

Definition at line 1786 of file SkRasterPipeline_opts.h.

1786 {
1787 static constexpr float min = -0.752941f;
1788 static constexpr float max = 1.25098f;
1789 static constexpr float range = max - min;
1790 *r = cast((rgba ) & 0x3ff) * (1/1023.0f) * range + min;
1791 *g = cast((rgba >> 10) & 0x3ff) * (1/1023.0f) * range + min;
1792 *b = cast((rgba >> 20) & 0x3ff) * (1/1023.0f) * range + min;
1793 *a = cast((rgba >> 30) ) * (1/ 3.0f);
1794}

◆ from_10x6()

SI void SK_OPTS_NS::from_10x6 ( U64  _10x6,
F r,
F g,
F b,
F a 
)

Definition at line 1801 of file SkRasterPipeline_opts.h.

1801 {
1802 *r = cast64((_10x6 >> 6) & 0x3ff) * (1/1023.0f);
1803 *g = cast64((_10x6 >> 22) & 0x3ff) * (1/1023.0f);
1804 *b = cast64((_10x6 >> 38) & 0x3ff) * (1/1023.0f);
1805 *a = cast64((_10x6 >> 54) & 0x3ff) * (1/1023.0f);
1806}

◆ from_1616()

SI void SK_OPTS_NS::from_1616 ( U32  _1616,
F r,
F g 
)

Definition at line 1807 of file SkRasterPipeline_opts.h.

1807 {
1808 *r = cast((_1616 ) & 0xffff) * (1/65535.0f);
1809 *g = cast((_1616 >> 16) & 0xffff) * (1/65535.0f);
1810}

◆ from_16161616()

SI void SK_OPTS_NS::from_16161616 ( U64  _16161616,
F r,
F g,
F b,
F a 
)

Definition at line 1811 of file SkRasterPipeline_opts.h.

1811 {
1812 *r = cast64((_16161616 ) & 0xffff) * (1/65535.0f);
1813 *g = cast64((_16161616 >> 16) & 0xffff) * (1/65535.0f);
1814 *b = cast64((_16161616 >> 32) & 0xffff) * (1/65535.0f);
1815 *a = cast64((_16161616 >> 48) & 0xffff) * (1/65535.0f);
1816}

◆ from_4444()

SI void SK_OPTS_NS::from_4444 ( U16  _4444,
F r,
F g,
F b,
F a 
)

Definition at line 1762 of file SkRasterPipeline_opts.h.

1762 {
1763 U32 wide = expand(_4444);
1764 *r = cast(wide & (15<<12)) * (1.0f / (15<<12));
1765 *g = cast(wide & (15<< 8)) * (1.0f / (15<< 8));
1766 *b = cast(wide & (15<< 4)) * (1.0f / (15<< 4));
1767 *a = cast(wide & (15<< 0)) * (1.0f / (15<< 0));
1768}
SI U32 expand(U8 v)

◆ from_565()

SI void SK_OPTS_NS::from_565 ( U16  _565,
F r,
F g,
F b 
)

Definition at line 1756 of file SkRasterPipeline_opts.h.

1756 {
1757 U32 wide = expand(_565);
1758 *r = cast(wide & (31<<11)) * (1.0f / (31<<11));
1759 *g = cast(wide & (63<< 5)) * (1.0f / (63<< 5));
1760 *b = cast(wide & (31<< 0)) * (1.0f / (31<< 0));
1761}

◆ from_88()

SI void SK_OPTS_NS::from_88 ( U16  _88,
F r,
F g 
)

Definition at line 1775 of file SkRasterPipeline_opts.h.

1775 {
1776 U32 wide = expand(_88);
1777 *r = cast((wide ) & 0xff) * (1/255.0f);
1778 *g = cast((wide >> 8) & 0xff) * (1/255.0f);
1779}

◆ from_8888()

SI void SK_OPTS_NS::from_8888 ( U32  _8888,
F r,
F g,
F b,
F a 
)

Definition at line 1769 of file SkRasterPipeline_opts.h.

1769 {
1770 *r = cast((_8888 ) & 0xff) * (1/255.0f);
1771 *g = cast((_8888 >> 8) & 0xff) * (1/255.0f);
1772 *b = cast((_8888 >> 16) & 0xff) * (1/255.0f);
1773 *a = cast((_8888 >> 24) ) * (1/255.0f);
1774}

◆ from_byte()

SI F SK_OPTS_NS::from_byte ( U8  b)

Definition at line 1750 of file SkRasterPipeline_opts.h.

1750 {
1751 return cast(expand(b)) * (1/255.0f);
1752}

◆ from_half()

SI F SK_OPTS_NS::from_half ( U16  h)

Definition at line 1399 of file SkRasterPipeline_opts.h.

1399 {
1400#if defined(JUMPER_IS_NEON) && defined(SK_CPU_ARM64)
1401 return vcvt_f32_f16((float16x4_t)h);
1402
1403#elif defined(JUMPER_IS_SKX)
1404 return _mm512_cvtph_ps((__m256i)h);
1405
1406#elif defined(JUMPER_IS_HSW)
1407 return _mm256_cvtph_ps((__m128i)h);
1408
1409#else
1410 // Remember, a half is 1-5-10 (sign-exponent-mantissa) with 15 exponent bias.
1411 U32 sem = expand(h),
1412 s = sem & 0x8000,
1413 em = sem ^ s;
1414
1415 // Convert to 1-8-23 float with 127 bias, flushing denorm halfs (including zero) to zero.
1416 auto denorm = (I32)em < 0x0400; // I32 comparison is often quicker, and always safe here.
1417 return if_then_else(denorm, F0
1418 , sk_bit_cast<F>( (s<<16) + (em<<13) + ((127-15)<<23) ));
1419#endif
1420}

◆ from_short()

SI F SK_OPTS_NS::from_short ( U16  s)

Definition at line 1753 of file SkRasterPipeline_opts.h.

1753 {
1754 return cast(expand(s)) * (1/65535.0f);
1755}

◆ gather()

template<typename T >
SI T SK_OPTS_NS::gather ( const T p,
U32  ix 
)

Definition at line 172 of file SkRasterPipeline_opts.h.

172{ return p[ix]; }

◆ gradient_lookup()

SI void SK_OPTS_NS::gradient_lookup ( const SkRasterPipeline_GradientCtx c,
U32  idx,
F  t,
F r,
F g,
F b,
F a 
)

Definition at line 3390 of file SkRasterPipeline_opts.h.

3391 {
3392 F fr, br, fg, bg, fb, bb, fa, ba;
3393#if defined(JUMPER_IS_HSW)
3394 if (c->stopCount <=8) {
3395 fr = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[0]), (__m256i)idx);
3396 br = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[0]), (__m256i)idx);
3397 fg = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[1]), (__m256i)idx);
3398 bg = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[1]), (__m256i)idx);
3399 fb = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[2]), (__m256i)idx);
3400 bb = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[2]), (__m256i)idx);
3401 fa = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[3]), (__m256i)idx);
3402 ba = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[3]), (__m256i)idx);
3403 } else
3404#elif defined(JUMPER_IS_LASX)
3405 if (c->stopCount <= 8) {
3406 fr = (__m256)__lasx_xvperm_w(__lasx_xvld(c->fs[0], 0), idx);
3407 br = (__m256)__lasx_xvperm_w(__lasx_xvld(c->bs[0], 0), idx);
3408 fg = (__m256)__lasx_xvperm_w(__lasx_xvld(c->fs[1], 0), idx);
3409 bg = (__m256)__lasx_xvperm_w(__lasx_xvld(c->bs[1], 0), idx);
3410 fb = (__m256)__lasx_xvperm_w(__lasx_xvld(c->fs[2], 0), idx);
3411 bb = (__m256)__lasx_xvperm_w(__lasx_xvld(c->bs[2], 0), idx);
3412 fa = (__m256)__lasx_xvperm_w(__lasx_xvld(c->fs[3], 0), idx);
3413 ba = (__m256)__lasx_xvperm_w(__lasx_xvld(c->bs[3], 0), idx);
3414 } else
3415#elif defined(JUMPER_IS_LSX)
3416 if (c->stopCount <= 4) {
3417 __m128i zero = __lsx_vldi(0);
3418 fr = (__m128)__lsx_vshuf_w(idx, zero, __lsx_vld(c->fs[0], 0));
3419 br = (__m128)__lsx_vshuf_w(idx, zero, __lsx_vld(c->bs[0], 0));
3420 fg = (__m128)__lsx_vshuf_w(idx, zero, __lsx_vld(c->fs[1], 0));
3421 bg = (__m128)__lsx_vshuf_w(idx, zero, __lsx_vld(c->bs[1], 0));
3422 fb = (__m128)__lsx_vshuf_w(idx, zero, __lsx_vld(c->fs[2], 0));
3423 bb = (__m128)__lsx_vshuf_w(idx, zero, __lsx_vld(c->bs[2], 0));
3424 fa = (__m128)__lsx_vshuf_w(idx, zero, __lsx_vld(c->fs[3], 0));
3425 ba = (__m128)__lsx_vshuf_w(idx, zero, __lsx_vld(c->bs[3], 0));
3426 } else
3427#endif
3428 {
3429 fr = gather(c->fs[0], idx);
3430 br = gather(c->bs[0], idx);
3431 fg = gather(c->fs[1], idx);
3432 bg = gather(c->bs[1], idx);
3433 fb = gather(c->fs[2], idx);
3434 bb = gather(c->bs[2], idx);
3435 fa = gather(c->fs[3], idx);
3436 ba = gather(c->bs[3], idx);
3437 }
3438
3439 *r = mad(t, fr, br);
3440 *g = mad(t, fg, bg);
3441 *b = mad(t, fb, bb);
3442 *a = mad(t, fa, ba);
3443}
SI T gather(const T *p, U32 ix)

◆ I32_()

SI constexpr I32 SK_OPTS_NS::I32_ ( int32_t  x)
constexpr

Definition at line 1300 of file SkRasterPipeline_opts.h.

1300{ return x; }

◆ if_then_else() [1/2]

SI F SK_OPTS_NS::if_then_else ( I32  c,
F  t,
F  e 
)

Definition at line 165 of file SkRasterPipeline_opts.h.

165{ return c ? t : e; }

◆ if_then_else() [2/2]

SI I32 SK_OPTS_NS::if_then_else ( I32  c,
I32  t,
I32  e 
)

Definition at line 166 of file SkRasterPipeline_opts.h.

166{ return c ? t : e; }

◆ inv()

SI F SK_OPTS_NS::inv ( F  x)

Definition at line 2171 of file SkRasterPipeline_opts.h.

2171{ return 1.0f - x; }

◆ invsqrt_fn()

SI void SK_OPTS_NS::invsqrt_fn ( F dst)

Definition at line 4410 of file SkRasterPipeline_opts.h.

4410 {
4411 *dst = rsqrt(*dst);
4412}

◆ iround()

SI I32 SK_OPTS_NS::iround ( F  v)

Definition at line 159 of file SkRasterPipeline_opts.h.

159{ return (I32)(v + 0.5f); }

◆ ix_and_ptr()

template<typename T >
SI U32 SK_OPTS_NS::ix_and_ptr ( T **  ptr,
const SkRasterPipeline_GatherCtx ctx,
F  x,
F  y 
)

Definition at line 1987 of file SkRasterPipeline_opts.h.

1987 {
1988 // We use exclusive clamp so that our min value is > 0 because ULP subtraction using U32 would
1989 // produce a NaN if applied to +0.f.
1990 x = clamp_ex(x, ctx->width );
1991 y = clamp_ex(y, ctx->height);
1992 x = sk_bit_cast<F>(sk_bit_cast<U32>(x) - (uint32_t)ctx->roundDownAtInteger);
1993 y = sk_bit_cast<F>(sk_bit_cast<U32>(y) - (uint32_t)ctx->roundDownAtInteger);
1994 *ptr = (const T*)ctx->pixels;
1995 return trunc_(y)*ctx->stride + trunc_(x);
1996}
SI F clamp_ex(F v, float limit)

◆ just_return()

static void ABI SK_OPTS_NS::just_return ( Params ,
SkRasterPipelineStage ,
F  ,
F  ,
F  ,
F   
)
static

Definition at line 1626 of file SkRasterPipeline_opts.h.

1626{}

◆ lerp()

SI F SK_OPTS_NS::lerp ( F  from,
F  to,
F  t 
)

Definition at line 2651 of file SkRasterPipeline_opts.h.

2651 {
2652 return mad(to-from, t, from);
2653}

◆ load()

template<typename V , typename T >
SI V SK_OPTS_NS::load ( const T src)

Definition at line 1741 of file SkRasterPipeline_opts.h.

1741 {
1742 return sk_unaligned_load<V>(src);
1743}

◆ load2()

SI void SK_OPTS_NS::load2 ( const uint16_t *  ptr,
U16 r,
U16 g 
)

Definition at line 178 of file SkRasterPipeline_opts.h.

178 {
179 *r = ptr[0];
180 *g = ptr[1];
181 }

◆ load4() [1/2]

SI void SK_OPTS_NS::load4 ( const float *  ptr,
F r,
F g,
F b,
F a 
)

Definition at line 199 of file SkRasterPipeline_opts.h.

199 {
200 *r = ptr[0];
201 *g = ptr[1];
202 *b = ptr[2];
203 *a = ptr[3];
204 }

◆ load4() [2/2]

SI void SK_OPTS_NS::load4 ( const uint16_t *  ptr,
U16 r,
U16 g,
U16 b,
U16 a 
)

Definition at line 186 of file SkRasterPipeline_opts.h.

186 {
187 *r = ptr[0];
188 *g = ptr[1];
189 *b = ptr[2];
190 *a = ptr[3];
191 }

◆ lum()

SI F SK_OPTS_NS::lum ( F  r,
F  g,
F  b 
)

Definition at line 2252 of file SkRasterPipeline_opts.h.

2252{ return mad(r, 0.30f, mad(g, 0.59f, b*0.11f)); }

◆ mad()

SI F SK_OPTS_NS::mad ( F  f,
F  m,
F  a 
)

Definition at line 148 of file SkRasterPipeline_opts.h.

148{ return a+f*m; }

◆ matrix_multiply()

template<int N>
SI void SK_OPTS_NS::matrix_multiply ( SkRasterPipeline_MatrixMultiplyCtx packed,
std::byte *  base 
)

Definition at line 4785 of file SkRasterPipeline_opts.h.

4785 {
4786 auto ctx = SkRPCtxUtils::Unpack(packed);
4787
4788 int outColumns = ctx.rightColumns,
4789 outRows = ctx.leftRows;
4790
4791 SkASSERT(outColumns >= 1);
4792 SkASSERT(outRows >= 1);
4793 SkASSERT(outColumns <= 4);
4794 SkASSERT(outRows <= 4);
4795
4796 SkASSERT(ctx.leftColumns == ctx.rightRows);
4797 SkASSERT(N == ctx.leftColumns); // N should match the result width
4798
4799#if !defined(JUMPER_IS_SCALAR)
4800 // This prevents Clang from generating early-out checks for zero-sized matrices.
4801 SK_ASSUME(outColumns >= 1);
4802 SK_ASSUME(outRows >= 1);
4803 SK_ASSUME(outColumns <= 4);
4804 SK_ASSUME(outRows <= 4);
4805#endif
4806
4807 // Get pointers to the adjacent left- and right-matrices.
4808 F* resultMtx = (F*)(base + ctx.dst);
4809 F* leftMtx = &resultMtx[ctx.rightColumns * ctx.leftRows];
4810 F* rightMtx = &leftMtx[N * ctx.leftRows];
4811
4812 // Emit each matrix element.
4813 for (int c = 0; c < outColumns; ++c) {
4814 for (int r = 0; r < outRows; ++r) {
4815 // Dot a vector from leftMtx[*][r] with rightMtx[c][*].
4816 F* leftRow = &leftMtx [r];
4817 F* rightColumn = &rightMtx[c * N];
4818
4819 F element = *leftRow * *rightColumn;
4820 for (int idx = 1; idx < N; ++idx) {
4821 leftRow += outRows;
4822 rightColumn += 1;
4823 element = mad(*leftRow, *rightColumn, element);
4824 }
4825
4826 *resultMtx++ = element;
4827 }
4828 }
4829}

◆ max() [1/3]

SI F SK_OPTS_NS::max ( F  a,
F  b 
)

Definition at line 144 of file SkRasterPipeline_opts.h.

144{ return fmaxf(a,b); }

◆ max() [2/3]

SI I32 SK_OPTS_NS::max ( I32  a,
I32  b 
)

Definition at line 145 of file SkRasterPipeline_opts.h.

145{ return a > b ? a : b; }

◆ max() [3/3]

SI U32 SK_OPTS_NS::max ( U32  a,
U32  b 
)

Definition at line 146 of file SkRasterPipeline_opts.h.

146{ return a > b ? a : b; }

◆ max_fn()

template<typename T >
SI void SK_OPTS_NS::max_fn ( T dst,
T src 
)

Definition at line 4606 of file SkRasterPipeline_opts.h.

4606 {
4607 *dst = max(*dst, *src);
4608}

◆ memset16()

void SK_OPTS_NS::memset16 ( uint16_t  buffer[],
uint16_t  value,
int  count 
)
inline

Definition at line 38 of file SkMemset_opts.h.

38 {
40 }
uint8_t value
static void memsetT(T buffer[], T value, int count)
Definition: SkMemset_opts.h:17
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126

◆ memset32()

void SK_OPTS_NS::memset32 ( uint32_t  buffer[],
uint32_t  value,
int  count 
)
inline

Definition at line 41 of file SkMemset_opts.h.

41 {
43 }

◆ memset64()

void SK_OPTS_NS::memset64 ( uint64_t  buffer[],
uint64_t  value,
int  count 
)
inline

Definition at line 44 of file SkMemset_opts.h.

44 {
46 }

◆ memsetT()

template<typename T >
static void SK_OPTS_NS::memsetT ( T  buffer[],
T  value,
int  count 
)
static

Definition at line 17 of file SkMemset_opts.h.

17 {
18 #if defined(SK_CPU_SSE_LEVEL) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX
19 static constexpr int VecSize = 32 / sizeof(T);
20 #else
21 static constexpr int VecSize = 16 / sizeof(T);
22 #endif
23 static_assert(VecSize > 0, "T is too big for memsetT");
24 // Create an vectorized version of value
25 skvx::Vec<VecSize,T> wideValue(value);
26 while (count >= VecSize) {
27 // Copy the value into the destination buffer (VecSize elements at a time)
28 wideValue.store(buffer);
29 buffer += VecSize;
30 count -= VecSize;
31 }
32 // If count was not an even multiple of VecSize, take care of the last few.
33 while (count-- > 0) {
34 *buffer++ = value;
35 }
36 }

◆ min() [1/3]

SI F SK_OPTS_NS::min ( F  a,
F  b 
)

Definition at line 141 of file SkRasterPipeline_opts.h.

141{ return fminf(a,b); }

◆ min() [2/3]

SI I32 SK_OPTS_NS::min ( I32  a,
I32  b 
)

Definition at line 142 of file SkRasterPipeline_opts.h.

142{ return a < b ? a : b; }

◆ min() [3/3]

SI U32 SK_OPTS_NS::min ( U32  a,
U32  b 
)

Definition at line 143 of file SkRasterPipeline_opts.h.

143{ return a < b ? a : b; }

◆ min_fn()

template<typename T >
SI void SK_OPTS_NS::min_fn ( T dst,
T src 
)

Definition at line 4611 of file SkRasterPipeline_opts.h.

4611 {
4612 *dst = min(*dst, *src);
4613}

◆ mix_fn() [1/2]

SI void SK_OPTS_NS::mix_fn ( F a,
F x,
F y 
)

Definition at line 4891 of file SkRasterPipeline_opts.h.

4891 {
4892 // We reorder the arguments here to match lerp's GLSL-style order (interpolation point last).
4893 *a = lerp(*x, *y, *a);
4894}
SkPoint lerp(const SkPoint &a, const SkPoint &b, float t)

◆ mix_fn() [2/2]

SI void SK_OPTS_NS::mix_fn ( I32 a,
I32 x,
I32 y 
)

Definition at line 4896 of file SkRasterPipeline_opts.h.

4896 {
4897 // We reorder the arguments here to match if_then_else's expected order (y before x).
4898 *a = if_then_else(*a, *y, *x);
4899}

◆ mod_()

SI F SK_OPTS_NS::mod_ ( F  x,
float  y 
)

Definition at line 2564 of file SkRasterPipeline_opts.h.

2564 {
2565 return nmad(y, floor_(x * (1 / y)), x);
2566}

◆ mod_fn()

SI void SK_OPTS_NS::mod_fn ( F dst,
F src 
)

Definition at line 4651 of file SkRasterPipeline_opts.h.

4651 {
4652 *dst = nmad(*src, floor_(*dst / *src), *dst);
4653}

◆ mul_fn()

template<typename T >
SI void SK_OPTS_NS::mul_fn ( T dst,
T src 
)

Definition at line 4579 of file SkRasterPipeline_opts.h.

4579 {
4580 *dst *= *src;
4581}

◆ nmad()

SI F SK_OPTS_NS::nmad ( F  f,
F  m,
F  a 
)

Definition at line 149 of file SkRasterPipeline_opts.h.

149{ return a-f*m; }

◆ pack() [1/2]

SI U8 SK_OPTS_NS::pack ( U16  v)

Definition at line 163 of file SkRasterPipeline_opts.h.

163{ return (U8)v; }

◆ pack() [2/2]

SI U16 SK_OPTS_NS::pack ( U32  v)

Definition at line 162 of file SkRasterPipeline_opts.h.

162{ return (U16)v; }

◆ patch_memory_contexts()

static void SK_OPTS_NS::patch_memory_contexts ( SkSpan< SkRasterPipeline_MemoryCtxPatch memoryCtxPatches,
size_t  dx,
size_t  dy,
size_t  tail 
)
static

Definition at line 1445 of file SkRasterPipeline_opts.h.

1446 {
1447 for (SkRasterPipeline_MemoryCtxPatch& patch : memoryCtxPatches) {
1448 SkRasterPipeline_MemoryCtx* ctx = patch.info.context;
1449
1450 const ptrdiff_t offset = patch.info.bytesPerPixel * (dy * ctx->stride + dx);
1451 if (patch.info.load) {
1452 void* ctxData = SkTAddOffset<void>(ctx->pixels, offset);
1453 memcpy(patch.scratch, ctxData, patch.info.bytesPerPixel * tail);
1454 }
1455
1456 SkASSERT(patch.backup == nullptr);
1457 void* scratchFakeBase = SkTAddOffset<void>(patch.scratch, -offset);
1458 patch.backup = ctx->pixels;
1459 ctx->pixels = scratchFakeBase;
1460 }
1461}
skia_private::AutoTArray< sk_sp< SkImageFilter > > filters TypedMatrix matrix TypedMatrix matrix SkScalar dx
Definition: SkRecords.h:208
SeparatedVector2 offset

◆ pow_fn()

SI void SK_OPTS_NS::pow_fn ( F dst,
F src 
)

Definition at line 4647 of file SkRasterPipeline_opts.h.

4647 {
4648 *dst = approx_powf(*dst, *src);
4649}
SI F approx_powf(F x, F y)

◆ ptr_at_xy()

template<typename T >
SI T * SK_OPTS_NS::ptr_at_xy ( const SkRasterPipeline_MemoryCtx ctx,
size_t  dx,
size_t  dy 
)

Definition at line 1820 of file SkRasterPipeline_opts.h.

1820 {
1821 return (T*)ctx->pixels + dy*ctx->stride + dx;
1822}

◆ raster_pipeline_highp_stride()

constexpr size_t SK_OPTS_NS::raster_pipeline_highp_stride ( )
constexpr

Definition at line 6608 of file SkRasterPipeline_opts.h.

6608{ return N; }

◆ raster_pipeline_lowp_stride()

constexpr size_t SK_OPTS_NS::raster_pipeline_lowp_stride ( )
constexpr

Allow outside code to access the Raster Pipeline pixel stride.

Definition at line 6607 of file SkRasterPipeline_opts.h.

6607{ return lowp::lowp_N; }
static constexpr size_t lowp_N

◆ rcp_approx()

SI F SK_OPTS_NS::rcp_approx ( F  v)

Definition at line 154 of file SkRasterPipeline_opts.h.

154{ return 1.0f / v; } // use rcp_fast instead

◆ rcp_fast()

SI F SK_OPTS_NS::rcp_fast ( F  v)

Definition at line 1483 of file SkRasterPipeline_opts.h.

1483{ return rcp_precise(v); }

◆ rcp_precise()

SI F SK_OPTS_NS::rcp_precise ( F  v)

Definition at line 157 of file SkRasterPipeline_opts.h.

157{ return 1.0f / v; }

◆ rect_memset16()

void SK_OPTS_NS::rect_memset16 ( uint16_t  buffer[],
uint16_t  value,
int  count,
size_t  rowBytes,
int  height 
)
inline

Definition at line 56 of file SkMemset_opts.h.

57 {
58 rect_memsetT(buffer, value, count, rowBytes, height);
59 }
static void rect_memsetT(T buffer[], T value, int count, size_t rowBytes, int height)
Definition: SkMemset_opts.h:49

◆ rect_memset32()

void SK_OPTS_NS::rect_memset32 ( uint32_t  buffer[],
uint32_t  value,
int  count,
size_t  rowBytes,
int  height 
)
inline

Definition at line 60 of file SkMemset_opts.h.

61 {
62 rect_memsetT(buffer, value, count, rowBytes, height);
63 }

◆ rect_memset64()

void SK_OPTS_NS::rect_memset64 ( uint64_t  buffer[],
uint64_t  value,
int  count,
size_t  rowBytes,
int  height 
)
inline

Definition at line 64 of file SkMemset_opts.h.

65 {
66 rect_memsetT(buffer, value, count, rowBytes, height);
67 }

◆ rect_memsetT()

template<typename T >
static void SK_OPTS_NS::rect_memsetT ( T  buffer[],
T  value,
int  count,
size_t  rowBytes,
int  height 
)
static

Definition at line 49 of file SkMemset_opts.h.

49 {
50 while (height --> 0) {
52 buffer = (T*)((char*)buffer + rowBytes);
53 }
54 }

◆ restore_memory_contexts()

static void SK_OPTS_NS::restore_memory_contexts ( SkSpan< SkRasterPipeline_MemoryCtxPatch memoryCtxPatches,
size_t  dx,
size_t  dy,
size_t  tail 
)
static

Definition at line 1463 of file SkRasterPipeline_opts.h.

1464 {
1465 for (SkRasterPipeline_MemoryCtxPatch& patch : memoryCtxPatches) {
1466 SkRasterPipeline_MemoryCtx* ctx = patch.info.context;
1467
1468 SkASSERT(patch.backup != nullptr);
1469 ctx->pixels = patch.backup;
1470 patch.backup = nullptr;
1471
1472 const ptrdiff_t offset = patch.info.bytesPerPixel * (dy * ctx->stride + dx);
1473 if (patch.info.store) {
1474 void* ctxData = SkTAddOffset<void>(ctx->pixels, offset);
1475 memcpy(ctxData, patch.scratch, patch.info.bytesPerPixel * tail);
1476 }
1477 }
1478}

◆ round() [1/2]

SI U32 SK_OPTS_NS::round ( F  v)

Definition at line 160 of file SkRasterPipeline_opts.h.

160{ return (U32)(v + 0.5f); }

◆ round() [2/2]

SI U32 SK_OPTS_NS::round ( F  v,
F  scale 
)

Definition at line 161 of file SkRasterPipeline_opts.h.

161{ return (U32)(v*scale + 0.5f); }
const Scalar scale

◆ rsqrt()

SI F SK_OPTS_NS::rsqrt ( F  v)

Definition at line 1484 of file SkRasterPipeline_opts.h.

1484{ return rcp_precise(sqrt_(v)); }

◆ rsqrt_approx()

SI F SK_OPTS_NS::rsqrt_approx ( F  v)

Definition at line 155 of file SkRasterPipeline_opts.h.

155{ return 1.0f / sqrtf(v); }

◆ S32_alpha_D32_filter_DX()

void SK_OPTS_NS::S32_alpha_D32_filter_DX ( const SkBitmapProcState s,
const uint32_t *  xy,
int  count,
uint32_t *  colors 
)
inline

Definition at line 47 of file SkBitmapProcState_opts.h.

48 {
49 SkASSERT(count > 0 && colors != nullptr);
50 SkASSERT(s.fBilerp);
51 SkASSERT(kN32_SkColorType == s.fPixmap.colorType());
52 SkASSERT(s.fAlphaScale <= 256);
53
54 // interpolate_in_x() is the crux of the SSSE3 implementation,
55 // interpolating in X for up to two output pixels (A and B) using _mm_maddubs_epi16().
56 auto interpolate_in_x = [](uint32_t A0, uint32_t A1,
57 uint32_t B0, uint32_t B1,
58 __m128i interlaced_x_weights) {
59 // _mm_maddubs_epi16() is a little idiosyncratic, but great as the core of a lerp.
60 //
61 // It takes two arguments interlaced byte-wise:
62 // - first arg: [ l,r, ... 7 more pairs of unsigned 8-bit values ...]
63 // - second arg: [ w,W, ... 7 more pairs of signed 8-bit values ...]
64 // and returns 8 signed 16-bit values: [ l*w + r*W, ... 7 more ... ].
65 //
66 // That's why we go to all this trouble to make interlaced_x_weights,
67 // and here we're about to interlace A0 with A1 and B0 with B1 to match.
68 //
69 // Our interlaced_x_weights are all in [0,16], and so we need not worry about
70 // the signedness of that input nor about the signedness of the output.
71
72 __m128i interlaced_A = _mm_unpacklo_epi8(_mm_cvtsi32_si128(A0), _mm_cvtsi32_si128(A1)),
73 interlaced_B = _mm_unpacklo_epi8(_mm_cvtsi32_si128(B0), _mm_cvtsi32_si128(B1));
74
75 return _mm_maddubs_epi16(_mm_unpacklo_epi64(interlaced_A, interlaced_B),
76 interlaced_x_weights);
77 };
78
79 // Interpolate {A0..A3} --> output pixel A, and {B0..B3} --> output pixel B.
80 // Returns two pixels, with each color channel in a 16-bit lane of the __m128i.
81 auto interpolate_in_x_and_y = [&](uint32_t A0, uint32_t A1,
82 uint32_t A2, uint32_t A3,
83 uint32_t B0, uint32_t B1,
84 uint32_t B2, uint32_t B3,
85 __m128i interlaced_x_weights,
86 int wy) {
87 // Interpolate each row in X, leaving 16-bit lanes scaled by interlaced_x_weights.
88 __m128i top = interpolate_in_x(A0,A1, B0,B1, interlaced_x_weights),
89 bot = interpolate_in_x(A2,A3, B2,B3, interlaced_x_weights);
90
91 // Interpolate in Y. As in the SSE2 code, we calculate top*(16-wy) + bot*wy
92 // as 16*top + (bot-top)*wy to save a multiply.
93 __m128i px = _mm_add_epi16(_mm_slli_epi16(top, 4),
94 _mm_mullo_epi16(_mm_sub_epi16(bot, top),
95 _mm_set1_epi16(wy)));
96
97 // Scale down by total max weight 16x16 = 256.
98 px = _mm_srli_epi16(px, 8);
99
100 // Scale by alpha if needed.
101 if (s.fAlphaScale < 256) {
102 px = _mm_srli_epi16(_mm_mullo_epi16(px, _mm_set1_epi16(s.fAlphaScale)), 8);
103 }
104 return px;
105 };
106
107 // We're in _DX mode here, so we're only varying in X.
108 // That means the first entry of xy is our constant pair of Y coordinates and weight in Y.
109 // All the other entries in xy will be pairs of X coordinates and the X weight.
110 int y0, y1, wy;
111 decode_packed_coordinates_and_weight(*xy++, &y0, &y1, &wy);
112
113 auto row0 = (const uint32_t*)((const uint8_t*)s.fPixmap.addr() + y0 * s.fPixmap.rowBytes()),
114 row1 = (const uint32_t*)((const uint8_t*)s.fPixmap.addr() + y1 * s.fPixmap.rowBytes());
115
116 while (count >= 4) {
117 // We can really get going, loading 4 X-pairs at a time to produce 4 output pixels.
118 int x0[4],
119 x1[4];
120 __m128i wx;
121
122 // decode_packed_coordinates_and_weight(), 4x.
123 __m128i packed = _mm_loadu_si128((const __m128i*)xy);
124 _mm_storeu_si128((__m128i*)x0, _mm_srli_epi32(packed, 18));
125 _mm_storeu_si128((__m128i*)x1, _mm_and_si128 (packed, _mm_set1_epi32(0x3fff)));
126 wx = _mm_and_si128(_mm_srli_epi32(packed, 14), _mm_set1_epi32(0xf)); // [0,15]
127
128 // Splat each x weight 4x (for each color channel) as wr for pixels on the right at x1,
129 // and sixteen minus that as wl for pixels on the left at x0.
130 __m128i wr = _mm_shuffle_epi8(wx, _mm_setr_epi8(0,0,0,0,4,4,4,4,8,8,8,8,12,12,12,12)),
131 wl = _mm_sub_epi8(_mm_set1_epi8(16), wr);
132
133 // We need to interlace wl and wr for _mm_maddubs_epi16().
134 __m128i interlaced_x_weights_AB = _mm_unpacklo_epi8(wl,wr),
135 interlaced_x_weights_CD = _mm_unpackhi_epi8(wl,wr);
136
137 enum { A,B,C,D };
138
139 // interpolate_in_x_and_y() can produce two output pixels (A and B) at a time
140 // from eight input pixels {A0..A3} and {B0..B3}, arranged in a 2x2 grid for each.
141 __m128i AB = interpolate_in_x_and_y(row0[x0[A]], row0[x1[A]],
142 row1[x0[A]], row1[x1[A]],
143 row0[x0[B]], row0[x1[B]],
144 row1[x0[B]], row1[x1[B]],
145 interlaced_x_weights_AB, wy);
146
147 // Once more with the other half of the x-weights for two more pixels C,D.
148 __m128i CD = interpolate_in_x_and_y(row0[x0[C]], row0[x1[C]],
149 row1[x0[C]], row1[x1[C]],
150 row0[x0[D]], row0[x1[D]],
151 row1[x0[D]], row1[x1[D]],
152 interlaced_x_weights_CD, wy);
153
154 // Scale by alpha, pack back together to 8-bit lanes, and write out four pixels!
155 _mm_storeu_si128((__m128i*)colors, _mm_packus_epi16(AB, CD));
156 xy += 4;
157 colors += 4;
158 count -= 4;
159 }
160
161 while (count --> 0) {
162 // This is exactly the same flow as the count >= 4 loop above, but writing one pixel.
163 int x0, x1, wx;
164 decode_packed_coordinates_and_weight(*xy++, &x0, &x1, &wx);
165
166 // As above, splat out wx four times as wr, and sixteen minus that as wl.
167 __m128i wr = _mm_set1_epi8(wx), // This splats it out 16 times, but that's fine.
168 wl = _mm_sub_epi8(_mm_set1_epi8(16), wr);
169
170 __m128i interlaced_x_weights = _mm_unpacklo_epi8(wl, wr);
171
172 __m128i A = interpolate_in_x_and_y(row0[x0], row0[x1],
173 row1[x0], row1[x1],
174 0, 0,
175 0, 0,
176 interlaced_x_weights, wy);
177
178 *colors++ = _mm_cvtsi128_si32(_mm_packus_epi16(A, _mm_setzero_si128()));
179 }
180 }
static void B2(DFData *curr, int width)
static void B1(DFData *curr, int width)
#define C(TEST_CATEGORY)
Definition: colrv1.cpp:248
#define B
PODArray< SkColor > colors
Definition: SkRecords.h:276
void decode_packed_coordinates_and_weight(U32 packed, Out *v0, Out *v1, Out *w)

◆ sat()

SI F SK_OPTS_NS::sat ( F  r,
F  g,
F  b 
)

Definition at line 2251 of file SkRasterPipeline_opts.h.

2251{ return max(r, max(g,b)) - min(r, min(g,b)); }

◆ save_xy()

SI void SK_OPTS_NS::save_xy ( F r,
F g,
SkRasterPipeline_SamplerCtx c 
)

Definition at line 3564 of file SkRasterPipeline_opts.h.

3564 {
3565 // Whether bilinear or bicubic, all sample points are at the same fractional offset (fx,fy).
3566 // They're either the 4 corners of a logical 1x1 pixel or the 16 corners of a 3x3 grid
3567 // surrounding (x,y) at (0.5,0.5) off-center.
3568 F fx = fract(*r + 0.5f),
3569 fy = fract(*g + 0.5f);
3570
3571 // Samplers will need to load x and fx, or y and fy.
3572 sk_unaligned_store(c->x, *r);
3573 sk_unaligned_store(c->y, *g);
3574 sk_unaligned_store(c->fx, fx);
3575 sk_unaligned_store(c->fy, fy);
3576}

◆ scatter_masked()

SI void SK_OPTS_NS::scatter_masked ( I32  src,
int dst,
U32  ix,
I32  mask 
)

Definition at line 174 of file SkRasterPipeline_opts.h.

174 {
175 dst[ix] = mask ? src : dst[ix];
176 }

◆ select_lane() [1/2]

SI int32_t SK_OPTS_NS::select_lane ( int32_t  data,
int   
)

Definition at line 2024 of file SkRasterPipeline_opts.h.

2024{ return data; }
std::shared_ptr< const fml::Mapping > data
Definition: texture_gles.cc:63

◆ select_lane() [2/2]

SI uint32_t SK_OPTS_NS::select_lane ( uint32_t  data,
int   
)

Definition at line 2023 of file SkRasterPipeline_opts.h.

2023{ return data; }

◆ set_lum()

SI void SK_OPTS_NS::set_lum ( F r,
F g,
F b,
F  l 
)

Definition at line 2265 of file SkRasterPipeline_opts.h.

2265 {
2266 F diff = l - lum(*r, *g, *b);
2267 *r += diff;
2268 *g += diff;
2269 *b += diff;
2270}

◆ set_sat()

SI void SK_OPTS_NS::set_sat ( F r,
F g,
F b,
F  s 
)

Definition at line 2254 of file SkRasterPipeline_opts.h.

2254 {
2255 F mn = min(*r, min(*g,*b)),
2256 mx = max(*r, max(*g,*b)),
2257 sat = mx - mn;
2258
2259 // Map min channel to 0, max channel to s, and scale the middle proportionally.
2260 s = if_then_else(sat == 0.0f, 0.0f, s * rcp_fast(sat));
2261 *r = (*r - mn) * s;
2262 *g = (*g - mn) * s;
2263 *b = (*b - mn) * s;
2264}
static float sat(float r, float g, float b)
Definition: hsl.cpp:51

◆ shuffle_fn()

template<int LoopCount, typename OffsetType >
SI void SK_OPTS_NS::shuffle_fn ( std::byte *  ptr,
OffsetType *  offsets,
int  numSlots 
)

Definition at line 4194 of file SkRasterPipeline_opts.h.

4194 {
4195 F scratch[16];
4196 SK_UNROLL for (int count = 0; count < LoopCount; ++count) {
4197 scratch[count] = *(F*)(ptr + offsets[count]);
4198 }
4199 // Surprisingly, this switch generates significantly better code than a memcpy (on x86-64) when
4200 // the number of slots is unknown at compile time, and generates roughly identical code when the
4201 // number of slots is hardcoded. Using a switch allows `scratch` to live in ymm0-ymm15 instead
4202 // of being written out to the stack and then read back in. Also, the intrinsic memcpy assumes
4203 // that `numSlots` could be arbitrarily large, and so it emits more code than we need.
4204 F* dst = (F*)ptr;
4205 switch (numSlots) {
4206 case 16: dst[15] = scratch[15]; [[fallthrough]];
4207 case 15: dst[14] = scratch[14]; [[fallthrough]];
4208 case 14: dst[13] = scratch[13]; [[fallthrough]];
4209 case 13: dst[12] = scratch[12]; [[fallthrough]];
4210 case 12: dst[11] = scratch[11]; [[fallthrough]];
4211 case 11: dst[10] = scratch[10]; [[fallthrough]];
4212 case 10: dst[ 9] = scratch[ 9]; [[fallthrough]];
4213 case 9: dst[ 8] = scratch[ 8]; [[fallthrough]];
4214 case 8: dst[ 7] = scratch[ 7]; [[fallthrough]];
4215 case 7: dst[ 6] = scratch[ 6]; [[fallthrough]];
4216 case 6: dst[ 5] = scratch[ 5]; [[fallthrough]];
4217 case 5: dst[ 4] = scratch[ 4]; [[fallthrough]];
4218 case 4: dst[ 3] = scratch[ 3]; [[fallthrough]];
4219 case 3: dst[ 2] = scratch[ 2]; [[fallthrough]];
4220 case 2: dst[ 1] = scratch[ 1]; [[fallthrough]];
4221 case 1: dst[ 0] = scratch[ 0];
4222 }
4223}
list offsets
Definition: mskp_parser.py:37

◆ sin5q_()

SI F SK_OPTS_NS::sin5q_ ( F  x)

Definition at line 1839 of file SkRasterPipeline_opts.h.

1839 {
1840 // A * x + B * x^3 + C * x^5
1841 // Exact at x = 0, 1/12, 1/6, 1/4, and their negatives,
1842 // which correspond to x * 2 * pi = 0, pi/6, pi/3, pi/2
1843 constexpr float A = 6.28230858f;
1844 constexpr float B = -41.1693687f;
1845 constexpr float C = 74.4388885f;
1846 F x2 = x * x;
1847 return x * mad(mad(x2, C, B), x2, A);
1848}

◆ sin_()

SI F SK_OPTS_NS::sin_ ( F  x)

Definition at line 1850 of file SkRasterPipeline_opts.h.

1850 {
1851 constexpr float one_over_pi2 = 1 / (2 * SK_FloatPI);
1852 x = mad(x, -one_over_pi2, 0.25f);
1853 x = 0.25f - abs_(x - floor_(x + 0.5f));
1854 return sin5q_(x);
1855}

◆ SkAlphaMul_lsx()

static __m128i SK_OPTS_NS::SkAlphaMul_lsx ( __m128i  x,
__m128i  y 
)
static

Definition at line 167 of file SkBlitMask_opts.h.

167 {
168 __m128i tmp = __lsx_vmul_h(x, y);
169 __m128i mask = __lsx_vreplgr2vr_h(0xff00);
170 return __lsx_vsrlri_h(__lsx_vand_v(tmp, mask), 8);
171 }

◆ small_swizzle_fn()

template<int N>
SI void SK_OPTS_NS::small_swizzle_fn ( SkRasterPipeline_SwizzleCtx packed,
std::byte *  base 
)

Definition at line 4226 of file SkRasterPipeline_opts.h.

4226 {
4227 auto ctx = SkRPCtxUtils::Unpack(packed);
4228 shuffle_fn<N>(base + ctx.dst, ctx.offsets, N);
4229}

◆ smoothstep_fn()

SI void SK_OPTS_NS::smoothstep_fn ( F edge0,
F edge1,
F x 
)

Definition at line 4901 of file SkRasterPipeline_opts.h.

4901 {
4902 F t = clamp_01_((*x - *edge0) / (*edge1 - *edge0));
4903 *edge0 = t * t * (3.0 - 2.0 * t);
4904}

◆ sqrt_()

SI F SK_OPTS_NS::sqrt_ ( F  v)

Definition at line 156 of file SkRasterPipeline_opts.h.

156{ return sqrtf(v); }

◆ stack_checkpoint()

static void ABI SK_OPTS_NS::stack_checkpoint ( Params params,
SkRasterPipelineStage program,
F  r,
F  g,
F  b,
F  a 
)
static

Definition at line 1658 of file SkRasterPipeline_opts.h.

1659 {
1660 SkRasterPipeline_RewindCtx* ctx = Ctx{program};
1661 while (program) {
1662 auto next = (Stage)(++program)->fn;
1663
1664 ctx->stage = nullptr;
1665 next(params, program, r, g, b, a);
1666 program = ctx->stage;
1667
1668 if (program) {
1669 r = sk_unaligned_load<F>(ctx->r );
1670 g = sk_unaligned_load<F>(ctx->g );
1671 b = sk_unaligned_load<F>(ctx->b );
1672 a = sk_unaligned_load<F>(ctx->a );
1673 params->dr = sk_unaligned_load<F>(ctx->dr);
1674 params->dg = sk_unaligned_load<F>(ctx->dg);
1675 params->db = sk_unaligned_load<F>(ctx->db);
1676 params->da = sk_unaligned_load<F>(ctx->da);
1677 params->base = ctx->base;
1678 }
1679 }
1680 }
static float next(float f)
const EmbeddedViewParams * params
void(ABI *)(Params *, SkRasterPipelineStage *program, F r, F g, F b, F a) Stage
float dg[SkRasterPipeline_kMaxStride_highp]
float g[SkRasterPipeline_kMaxStride_highp]
float dr[SkRasterPipeline_kMaxStride_highp]
float db[SkRasterPipeline_kMaxStride_highp]
float a[SkRasterPipeline_kMaxStride_highp]
float r[SkRasterPipeline_kMaxStride_highp]
float da[SkRasterPipeline_kMaxStride_highp]
float b[SkRasterPipeline_kMaxStride_highp]

◆ stack_rewind()

static void ABI SK_OPTS_NS::stack_rewind ( Params params,
SkRasterPipelineStage program,
F  r,
F  g,
F  b,
F  a 
)
static

Definition at line 1681 of file SkRasterPipeline_opts.h.

1682 {
1683 SkRasterPipeline_RewindCtx* ctx = Ctx{program};
1684 sk_unaligned_store(ctx->r , r );
1685 sk_unaligned_store(ctx->g , g );
1686 sk_unaligned_store(ctx->b , b );
1687 sk_unaligned_store(ctx->a , a );
1688 sk_unaligned_store(ctx->dr, params->dr);
1689 sk_unaligned_store(ctx->dg, params->dg);
1690 sk_unaligned_store(ctx->db, params->db);
1691 sk_unaligned_store(ctx->da, params->da);
1692 ctx->base = params->base;
1693 ctx->stage = program;
1694 }

◆ STAGE() [1/188]

SK_OPTS_NS::STAGE ( accumulate  ,
const SkRasterPipeline_SamplerCtx c 
)

Definition at line 3578 of file SkRasterPipeline_opts.h.

3578 {
3579 // Bilinear and bicubic filters are both separable, so we produce independent contributions
3580 // from x and y, multiplying them together here to get each pixel's total scale factor.
3581 auto scale = sk_unaligned_load<F>(c->scalex)
3582 * sk_unaligned_load<F>(c->scaley);
3583 dr = mad(scale, r, dr);
3584 dg = mad(scale, g, dg);
3585 db = mad(scale, b, db);
3586 da = mad(scale, a, da);
3587}

◆ STAGE() [2/188]

SK_OPTS_NS::STAGE ( alpha_to_gray  ,
NoCtx   
)

Definition at line 3307 of file SkRasterPipeline_opts.h.

3307 {
3308 r = g = b = a;
3309 a = F1;
3310}
static constexpr F F1

◆ STAGE() [3/188]

SK_OPTS_NS::STAGE ( alpha_to_gray_dst  ,
NoCtx   
)

Definition at line 3311 of file SkRasterPipeline_opts.h.

3311 {
3312 dr = dg = db = da;
3313 da = F1;
3314}

◆ STAGE() [4/188]

SK_OPTS_NS::STAGE ( alpha_to_red  ,
NoCtx   
)

Definition at line 3315 of file SkRasterPipeline_opts.h.

3315 {
3316 r = a;
3317 a = F1;
3318}

◆ STAGE() [5/188]

SK_OPTS_NS::STAGE ( alpha_to_red_dst  ,
NoCtx   
)

Definition at line 3319 of file SkRasterPipeline_opts.h.

3319 {
3320 dr = da;
3321 da = F1;
3322}

◆ STAGE() [6/188]

SK_OPTS_NS::STAGE ( alter_2pt_conical_compensate_focal  ,
const SkRasterPipeline_2PtConicalCtx ctx 
)

Definition at line 3532 of file SkRasterPipeline_opts.h.

3532 {
3533 F& t = r;
3534 t = t + ctx->fP1; // ctx->fP1 = f
3535}

◆ STAGE() [7/188]

SK_OPTS_NS::STAGE ( alter_2pt_conical_unswap  ,
NoCtx   
)

Definition at line 3537 of file SkRasterPipeline_opts.h.

3537 {
3538 F& t = r;
3539 t = 1 - t;
3540}

◆ STAGE() [8/188]

SK_OPTS_NS::STAGE ( apply_vector_mask  ,
const uint32_t *  ctx 
)

Definition at line 3556 of file SkRasterPipeline_opts.h.

3556 {
3557 const U32 mask = sk_unaligned_load<U32>(ctx);
3558 r = sk_bit_cast<F>(sk_bit_cast<U32>(r) & mask);
3559 g = sk_bit_cast<F>(sk_bit_cast<U32>(g) & mask);
3560 b = sk_bit_cast<F>(sk_bit_cast<U32>(b) & mask);
3561 a = sk_bit_cast<F>(sk_bit_cast<U32>(a) & mask);
3562}

◆ STAGE() [9/188]

SK_OPTS_NS::STAGE ( bicubic_clamp_8888  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 4994 of file SkRasterPipeline_opts.h.

4994 {
4995 // (cx,cy) are the center of our sample.
4996 F cx = r,
4997 cy = g;
4998
4999 // All sample points are at the same fractional offset (fx,fy).
5000 // They're the 4 corners of a logical 1x1 pixel surrounding (x,y) at (0.5,0.5) offsets.
5001 F fx = fract(cx + 0.5f),
5002 fy = fract(cy + 0.5f);
5003
5004 // We'll accumulate the color of all four samples into {r,g,b,a} directly.
5005 r = g = b = a = F0;
5006
5007 const float* w = ctx->weights;
5008 const F scaley[4] = {bicubic_wts(fy, w[0], w[4], w[ 8], w[12]),
5009 bicubic_wts(fy, w[1], w[5], w[ 9], w[13]),
5010 bicubic_wts(fy, w[2], w[6], w[10], w[14]),
5011 bicubic_wts(fy, w[3], w[7], w[11], w[15])};
5012 const F scalex[4] = {bicubic_wts(fx, w[0], w[4], w[ 8], w[12]),
5013 bicubic_wts(fx, w[1], w[5], w[ 9], w[13]),
5014 bicubic_wts(fx, w[2], w[6], w[10], w[14]),
5015 bicubic_wts(fx, w[3], w[7], w[11], w[15])};
5016
5017 F sample_y = cy - 1.5f;
5018 for (int yy = 0; yy <= 3; ++yy) {
5019 F sample_x = cx - 1.5f;
5020 for (int xx = 0; xx <= 3; ++xx) {
5021 F scale = scalex[xx] * scaley[yy];
5022
5023 // ix_and_ptr() will clamp to the image's bounds for us.
5024 const uint32_t* ptr;
5025 U32 ix = ix_and_ptr(&ptr, ctx, sample_x, sample_y);
5026
5027 F sr,sg,sb,sa;
5028 from_8888(gather(ptr, ix), &sr,&sg,&sb,&sa);
5029
5030 r = mad(scale, sr, r);
5031 g = mad(scale, sg, g);
5032 b = mad(scale, sb, b);
5033 a = mad(scale, sa, a);
5034
5035 sample_x += 1;
5036 }
5037 sample_y += 1;
5038 }
5039}
SI void from_8888(U32 _8888, F *r, F *g, F *b, F *a)
SI U32 ix_and_ptr(T **ptr, const SkRasterPipeline_GatherCtx *ctx, F x, F y)
SI F bicubic_wts(F t, float A, float B, float C, float D)

◆ STAGE() [10/188]

SK_OPTS_NS::STAGE ( bicubic_n1x  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3682 of file SkRasterPipeline_opts.h.

3682{ bicubic_x<-1>(ctx, &r); }
SI void bicubic_x(SkRasterPipeline_SamplerCtx *ctx, F *x)

◆ STAGE() [11/188]

SK_OPTS_NS::STAGE ( bicubic_n1y  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3687 of file SkRasterPipeline_opts.h.

3687{ bicubic_y<-1>(ctx, &g); }
SI void bicubic_y(SkRasterPipeline_SamplerCtx *ctx, F *y)

◆ STAGE() [12/188]

SK_OPTS_NS::STAGE ( bicubic_n3x  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3681 of file SkRasterPipeline_opts.h.

3681{ bicubic_x<-3>(ctx, &r); }

◆ STAGE() [13/188]

SK_OPTS_NS::STAGE ( bicubic_n3y  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3686 of file SkRasterPipeline_opts.h.

3686{ bicubic_y<-3>(ctx, &g); }

◆ STAGE() [14/188]

SK_OPTS_NS::STAGE ( bicubic_p1x  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3683 of file SkRasterPipeline_opts.h.

3683{ bicubic_x<+1>(ctx, &r); }

◆ STAGE() [15/188]

SK_OPTS_NS::STAGE ( bicubic_p1y  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3688 of file SkRasterPipeline_opts.h.

3688{ bicubic_y<+1>(ctx, &g); }

◆ STAGE() [16/188]

SK_OPTS_NS::STAGE ( bicubic_p3x  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3684 of file SkRasterPipeline_opts.h.

3684{ bicubic_x<+3>(ctx, &r); }

◆ STAGE() [17/188]

SK_OPTS_NS::STAGE ( bicubic_p3y  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3689 of file SkRasterPipeline_opts.h.

3689{ bicubic_y<+3>(ctx, &g); }

◆ STAGE() [18/188]

SK_OPTS_NS::STAGE ( bicubic_setup  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3660 of file SkRasterPipeline_opts.h.

3660 {
3661 save_xy(&r, &g, ctx);
3662
3663 const float* w = ctx->weights;
3664
3665 F fx = sk_unaligned_load<F>(ctx->fx);
3666 sk_unaligned_store(ctx->wx[0], bicubic_wts(fx, w[0], w[4], w[ 8], w[12]));
3667 sk_unaligned_store(ctx->wx[1], bicubic_wts(fx, w[1], w[5], w[ 9], w[13]));
3668 sk_unaligned_store(ctx->wx[2], bicubic_wts(fx, w[2], w[6], w[10], w[14]));
3669 sk_unaligned_store(ctx->wx[3], bicubic_wts(fx, w[3], w[7], w[11], w[15]));
3670
3671 F fy = sk_unaligned_load<F>(ctx->fy);
3672 sk_unaligned_store(ctx->wy[0], bicubic_wts(fy, w[0], w[4], w[ 8], w[12]));
3673 sk_unaligned_store(ctx->wy[1], bicubic_wts(fy, w[1], w[5], w[ 9], w[13]));
3674 sk_unaligned_store(ctx->wy[2], bicubic_wts(fy, w[2], w[6], w[10], w[14]));
3675 sk_unaligned_store(ctx->wy[3], bicubic_wts(fy, w[3], w[7], w[11], w[15]));
3676
3677 // Init for accumulate
3678 dr = dg = db = da = F0;
3679}
SI void save_xy(F *r, F *g, SkRasterPipeline_SamplerCtx *c)

◆ STAGE() [19/188]

SK_OPTS_NS::STAGE ( bilerp_clamp_8888  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 4952 of file SkRasterPipeline_opts.h.

4952 {
4953 // (cx,cy) are the center of our sample.
4954 F cx = r,
4955 cy = g;
4956
4957 // All sample points are at the same fractional offset (fx,fy).
4958 // They're the 4 corners of a logical 1x1 pixel surrounding (x,y) at (0.5,0.5) offsets.
4959 F fx = fract(cx + 0.5f),
4960 fy = fract(cy + 0.5f);
4961
4962 // We'll accumulate the color of all four samples into {r,g,b,a} directly.
4963 r = g = b = a = F0;
4964
4965 for (float py = -0.5f; py <= +0.5f; py += 1.0f)
4966 for (float px = -0.5f; px <= +0.5f; px += 1.0f) {
4967 // (x,y) are the coordinates of this sample point.
4968 F x = cx + px,
4969 y = cy + py;
4970
4971 // ix_and_ptr() will clamp to the image's bounds for us.
4972 const uint32_t* ptr;
4973 U32 ix = ix_and_ptr(&ptr, ctx, x,y);
4974
4975 F sr,sg,sb,sa;
4976 from_8888(gather(ptr, ix), &sr,&sg,&sb,&sa);
4977
4978 // In bilinear interpolation, the 4 pixels at +/- 0.5 offsets from the sample pixel center
4979 // are combined in direct proportion to their area overlapping that logical query pixel.
4980 // At positive offsets, the x-axis contribution to that rectangle is fx,
4981 // or (1-fx) at negative x. Same deal for y.
4982 F sx = (px > 0) ? fx : 1.0f - fx,
4983 sy = (py > 0) ? fy : 1.0f - fy,
4984 area = sx * sy;
4985
4986 r += sr * area;
4987 g += sg * area;
4988 b += sb * area;
4989 a += sa * area;
4990 }
4991}

◆ STAGE() [20/188]

SK_OPTS_NS::STAGE ( bilinear_nx  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3621 of file SkRasterPipeline_opts.h.

3621{ bilinear_x<-1>(ctx, &r); }
SI void bilinear_x(SkRasterPipeline_SamplerCtx *ctx, F *x)

◆ STAGE() [21/188]

SK_OPTS_NS::STAGE ( bilinear_ny  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3623 of file SkRasterPipeline_opts.h.

3623{ bilinear_y<-1>(ctx, &g); }
SI void bilinear_y(SkRasterPipeline_SamplerCtx *ctx, F *y)

◆ STAGE() [22/188]

SK_OPTS_NS::STAGE ( bilinear_px  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3622 of file SkRasterPipeline_opts.h.

3622{ bilinear_x<+1>(ctx, &r); }

◆ STAGE() [23/188]

SK_OPTS_NS::STAGE ( bilinear_py  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3624 of file SkRasterPipeline_opts.h.

3624{ bilinear_y<+1>(ctx, &g); }

◆ STAGE() [24/188]

SK_OPTS_NS::STAGE ( bilinear_setup  ,
SkRasterPipeline_SamplerCtx ctx 
)

Definition at line 3615 of file SkRasterPipeline_opts.h.

3615 {
3616 save_xy(&r, &g, ctx);
3617 // Init for accumulate
3618 dr = dg = db = da = F0;
3619}

◆ STAGE() [25/188]

SK_OPTS_NS::STAGE ( black_color  ,
NoCtx   
)

Definition at line 2105 of file SkRasterPipeline_opts.h.

2105 {
2106 r = g = b = F0;
2107 a = F1;
2108}

◆ STAGE() [26/188]

SK_OPTS_NS::STAGE ( bt709_luminance_or_luma_to_alpha  ,
NoCtx   
)

Definition at line 3324 of file SkRasterPipeline_opts.h.

3324 {
3325 a = r*0.2126f + g*0.7152f + b*0.0722f;
3326 r = g = b = F0;
3327}

◆ STAGE() [27/188]

SK_OPTS_NS::STAGE ( bt709_luminance_or_luma_to_rgb  ,
NoCtx   
)

Definition at line 3328 of file SkRasterPipeline_opts.h.

3328 {
3329 r = g = b = r*0.2126f + g*0.7152f + b*0.0722f;
3330}

◆ STAGE() [28/188]

SK_OPTS_NS::STAGE ( byte_tables  ,
const SkRasterPipeline_TablesCtx tables 
)

Definition at line 2712 of file SkRasterPipeline_opts.h.

2712 {
2713 r = from_byte(gather(tables->r, to_unorm(r, 255)));
2714 g = from_byte(gather(tables->g, to_unorm(g, 255)));
2715 b = from_byte(gather(tables->b, to_unorm(b, 255)));
2716 a = from_byte(gather(tables->a, to_unorm(a, 255)));
2717}
SI U32 to_unorm(F v, float scale, float bias=1.0f)

◆ STAGE() [29/188]

SK_OPTS_NS::STAGE ( callback  ,
SkRasterPipeline_CallbackCtx c 
)

Definition at line 3831 of file SkRasterPipeline_opts.h.

3831 {
3832 store4(c->rgba, r,g,b,a);
3833 c->fn(c, N);
3834 load4(c->read_from, &r,&g,&b,&a);
3835}
SI void load4(const float *ptr, F *r, F *g, F *b, F *a)
SI void store4(float *ptr, F r, F g, F b, F a)
float rgba[4 *SkRasterPipeline_kMaxStride_highp]
void(* fn)(SkRasterPipeline_CallbackCtx *self, int active_pixels)

◆ STAGE() [30/188]

SK_OPTS_NS::STAGE ( check_decal_mask  ,
SkRasterPipeline_DecalTileCtx ctx 
)

Definition at line 3299 of file SkRasterPipeline_opts.h.

3299 {
3300 auto mask = sk_unaligned_load<U32>(ctx->mask);
3301 r = sk_bit_cast<F>(sk_bit_cast<U32>(r) & mask);
3302 g = sk_bit_cast<F>(sk_bit_cast<U32>(g) & mask);
3303 b = sk_bit_cast<F>(sk_bit_cast<U32>(b) & mask);
3304 a = sk_bit_cast<F>(sk_bit_cast<U32>(a) & mask);
3305}
uint32_t mask[SkRasterPipeline_kMaxStride]

◆ STAGE() [31/188]

SK_OPTS_NS::STAGE ( clamp_01  ,
NoCtx   
)

Definition at line 2373 of file SkRasterPipeline_opts.h.

2373 {
2374 r = clamp_01_(r);
2375 g = clamp_01_(g);
2376 b = clamp_01_(b);
2377 a = clamp_01_(a);
2378}

◆ STAGE() [32/188]

SK_OPTS_NS::STAGE ( clamp_gamut  ,
NoCtx   
)

Definition at line 2380 of file SkRasterPipeline_opts.h.

2380 {
2381 a = min(max(a, 0.0f), 1.0f);
2382 r = min(max(r, 0.0f), a);
2383 g = min(max(g, 0.0f), a);
2384 b = min(max(b, 0.0f), a);
2385}

◆ STAGE() [33/188]

SK_OPTS_NS::STAGE ( clamp_x_1  ,
NoCtx   
)

Definition at line 3263 of file SkRasterPipeline_opts.h.

3263{ r = clamp_01_(r); }

◆ STAGE() [34/188]

SK_OPTS_NS::STAGE ( clamp_x_and_y  ,
const SkRasterPipeline_CoordClampCtx ctx 
)

◆ STAGE() [35/188]

SK_OPTS_NS::STAGE ( color  ,
NoCtx   
)

Definition at line 2319 of file SkRasterPipeline_opts.h.

2319 {
2320 F R = r*da,
2321 G = g*da,
2322 B = b*da;
2323
2324 set_lum(&R, &G, &B, lum(dr,dg,db)*a);
2325 clip_color(&R,&G,&B, a*da);
2326
2327 r = mad(r, inv(da), mad(dr, inv(a), R));
2328 g = mad(g, inv(da), mad(dg, inv(a), G));
2329 b = mad(b, inv(da), mad(db, inv(a), B));
2330 a = a + nmad(a, da, da);
2331}
static void clip_color(float *r, float *g, float *b)
Definition: hsl.cpp:68
static void set_lum(float *r, float *g, float *b, float l)
Definition: hsl.cpp:83
#define R(r)
Definition: SkMD5.cpp:125

◆ STAGE() [36/188]

SK_OPTS_NS::STAGE ( css_hcl_to_lab  ,
NoCtx   
)

Definition at line 2552 of file SkRasterPipeline_opts.h.

2552 {
2553 F H = r,
2554 C = g,
2555 L = b;
2556
2557 F hueRadians = H * (SK_FloatPI / 180);
2558
2559 r = L;
2560 g = C * cos_(hueRadians);
2561 b = C * sin_(hueRadians);
2562}
Definition: SkMD5.cpp:130

◆ STAGE() [37/188]

SK_OPTS_NS::STAGE ( css_hsl_to_srgb  ,
NoCtx   
)

Definition at line 2589 of file SkRasterPipeline_opts.h.

2589 {
2590 RGB rgb = css_hsl_to_srgb_(r, g, b);
2591 r = rgb.r;
2592 g = rgb.g;
2593 b = rgb.b;
2594}
SI RGB css_hsl_to_srgb_(F h, F s, F l)

◆ STAGE() [38/188]

SK_OPTS_NS::STAGE ( css_hwb_to_srgb  ,
NoCtx   
)

Definition at line 2596 of file SkRasterPipeline_opts.h.

2596 {
2597 g *= 0.01f;
2598 b *= 0.01f;
2599
2600 F gray = g / (g + b);
2601
2602 RGB rgb = css_hsl_to_srgb_(r, F_(100.0f), F_(50.0f));
2603 rgb.r = rgb.r * (1 - g - b) + g;
2604 rgb.g = rgb.g * (1 - g - b) + g;
2605 rgb.b = rgb.b * (1 - g - b) + g;
2606
2607 auto isGray = (g + b) >= 1;
2608
2609 r = if_then_else(isGray, gray, rgb.r);
2610 g = if_then_else(isGray, gray, rgb.g);
2611 b = if_then_else(isGray, gray, rgb.b);
2612}

◆ STAGE() [39/188]

SK_OPTS_NS::STAGE ( css_lab_to_xyz  ,
NoCtx   
)

Definition at line 2496 of file SkRasterPipeline_opts.h.

2496 {
2497 constexpr float k = 24389 / 27.0f;
2498 constexpr float e = 216 / 24389.0f;
2499
2500 F f[3];
2501 f[1] = (r + 16) * (1 / 116.0f);
2502 f[0] = (g * (1 / 500.0f)) + f[1];
2503 f[2] = f[1] - (b * (1 / 200.0f));
2504
2505 F f_cubed[3] = { f[0]*f[0]*f[0], f[1]*f[1]*f[1], f[2]*f[2]*f[2] };
2506
2507 F xyz[3] = {
2508 if_then_else(f_cubed[0] > e, f_cubed[0], (116 * f[0] - 16) * (1 / k)),
2509 if_then_else(r > k * e, f_cubed[1], r * (1 / k)),
2510 if_then_else(f_cubed[2] > e, f_cubed[2], (116 * f[2] - 16) * (1 / k))
2511 };
2512
2513 constexpr float D50[3] = { 0.3457f / 0.3585f, 1.0f, (1.0f - 0.3457f - 0.3585f) / 0.3585f };
2514 r = xyz[0]*D50[0];
2515 g = xyz[1]*D50[1];
2516 b = xyz[2]*D50[2];
2517}

◆ STAGE() [40/188]

SK_OPTS_NS::STAGE ( css_oklab_gamut_map_to_linear_srgb  ,
NoCtx   
)

Definition at line 2533 of file SkRasterPipeline_opts.h.

2533 {
2534 // TODO(https://crbug.com/1508329): Add support for gamut mapping.
2535 // Return a greyscale value, so that accidental use is obvious.
2536 F l_ = r,
2537 m_ = r,
2538 s_ = r;
2539
2540 F l = l_*l_*l_,
2541 m = m_*m_*m_,
2542 s = s_*s_*s_;
2543
2544 r = +4.0767416621f * l - 3.3077115913f * m + 0.2309699292f * s;
2545 g = -1.2684380046f * l + 2.6097574011f * m - 0.3413193965f * s;
2546 b = -0.0041960863f * l - 0.7034186147f * m + 1.7076147010f * s;
2547}

◆ STAGE() [41/188]

SK_OPTS_NS::STAGE ( css_oklab_to_linear_srgb  ,
NoCtx   
)

Definition at line 2519 of file SkRasterPipeline_opts.h.

2519 {
2520 F l_ = r + 0.3963377774f * g + 0.2158037573f * b,
2521 m_ = r - 0.1055613458f * g - 0.0638541728f * b,
2522 s_ = r - 0.0894841775f * g - 1.2914855480f * b;
2523
2524 F l = l_*l_*l_,
2525 m = m_*m_*m_,
2526 s = s_*s_*s_;
2527
2528 r = +4.0767416621f * l - 3.3077115913f * m + 0.2309699292f * s;
2529 g = -1.2684380046f * l + 2.6097574011f * m - 0.3413193965f * s;
2530 b = -0.0041960863f * l - 0.7034186147f * m + 1.7076147010f * s;
2531}

◆ STAGE() [42/188]

SK_OPTS_NS::STAGE ( decal_x  ,
SkRasterPipeline_DecalTileCtx ctx 
)

Definition at line 3278 of file SkRasterPipeline_opts.h.

3278 {
3279 auto w = ctx->limit_x;
3280 auto e = ctx->inclusiveEdge_x;
3281 auto cond = ((0 < r) & (r < w)) | (r == e);
3283}

◆ STAGE() [43/188]

SK_OPTS_NS::STAGE ( decal_x_and_y  ,
SkRasterPipeline_DecalTileCtx ctx 
)

Definition at line 3290 of file SkRasterPipeline_opts.h.

3290 {
3291 auto w = ctx->limit_x;
3292 auto h = ctx->limit_y;
3293 auto ex = ctx->inclusiveEdge_x;
3294 auto ey = ctx->inclusiveEdge_y;
3295 auto cond = (((0 < r) & (r < w)) | (r == ex))
3296 & (((0 < g) & (g < h)) | (g == ey));
3298}

◆ STAGE() [44/188]

SK_OPTS_NS::STAGE ( decal_y  ,
SkRasterPipeline_DecalTileCtx ctx 
)

Definition at line 3284 of file SkRasterPipeline_opts.h.

3284 {
3285 auto h = ctx->limit_y;
3286 auto e = ctx->inclusiveEdge_y;
3287 auto cond = ((0 < g) & (g < h)) | (g == e);
3289}

◆ STAGE() [45/188]

SK_OPTS_NS::STAGE ( dither  ,
const float *  rate 
)

Definition at line 2049 of file SkRasterPipeline_opts.h.

2049 {
2050 // Get [(dx,dy), (dx+1,dy), (dx+2,dy), ...] loaded up in integer vectors.
2051 uint32_t iota[] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
2052 static_assert(std::size(iota) >= SkRasterPipeline_kMaxStride_highp);
2053
2054 U32 X = U32_(dx) + sk_unaligned_load<U32>(iota),
2055 Y = U32_(dy);
2056
2057 // We're doing 8x8 ordered dithering, see https://en.wikipedia.org/wiki/Ordered_dithering.
2058 // In this case n=8 and we're using the matrix that looks like 1/64 x [ 0 48 12 60 ... ].
2059
2060 // We only need X and X^Y from here on, so it's easier to just think of that as "Y".
2061 Y ^= X;
2062
2063 // We'll mix the bottom 3 bits of each of X and Y to make 6 bits,
2064 // for 2^6 == 64 == 8x8 matrix values. If X=abc and Y=def, we make fcebda.
2065 U32 M = (Y & 1) << 5 | (X & 1) << 4
2066 | (Y & 2) << 2 | (X & 2) << 1
2067 | (Y & 4) >> 1 | (X & 4) >> 2;
2068
2069 // Scale that dither to [0,1), then (-0.5,+0.5), here using 63/128 = 0.4921875 as 0.5-epsilon.
2070 // We want to make sure our dither is less than 0.5 in either direction to keep exact values
2071 // like 0 and 1 unchanged after rounding.
2072 F dither = mad(cast(M), 2/128.0f, -63/128.0f);
2073
2074 r = mad(dither, *rate, r);
2075 g = mad(dither, *rate, g);
2076 b = mad(dither, *rate, b);
2077
2078 r = max(0.0f, min(r, a));
2079 g = max(0.0f, min(g, a));
2080 b = max(0.0f, min(b, a));
2081}
static constexpr int SkRasterPipeline_kMaxStride_highp
#define M(st)
static const SkScalar Y
Definition: StrokeBench.cpp:55
static const SkScalar X
Definition: StrokeBench.cpp:54
SI constexpr U32 U32_(uint32_t x)
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259

◆ STAGE() [46/188]

SK_OPTS_NS::STAGE ( emboss  ,
const SkRasterPipeline_EmbossCtx ctx 
)

Definition at line 2700 of file SkRasterPipeline_opts.h.

2700 {
2701 auto mptr = ptr_at_xy<const uint8_t>(&ctx->mul, dx,dy),
2702 aptr = ptr_at_xy<const uint8_t>(&ctx->add, dx,dy);
2703
2704 F mul = from_byte(load<U8>(mptr)),
2705 add = from_byte(load<U8>(aptr));
2706
2707 r = mad(r, mul, add);
2708 g = mad(g, mul, add);
2709 b = mad(b, mul, add);
2710}
SkRasterPipeline_MemoryCtx add
SkRasterPipeline_MemoryCtx mul

◆ STAGE() [47/188]

SK_OPTS_NS::STAGE ( evenly_spaced_2_stop_gradient  ,
const SkRasterPipeline_EvenlySpaced2StopGradientCtx c 
)

Definition at line 3463 of file SkRasterPipeline_opts.h.

3463 {
3464 auto t = r;
3465 r = mad(t, c->f[0], c->b[0]);
3466 g = mad(t, c->f[1], c->b[1]);
3467 b = mad(t, c->f[2], c->b[2]);
3468 a = mad(t, c->f[3], c->b[3]);
3469}

◆ STAGE() [48/188]

SK_OPTS_NS::STAGE ( evenly_spaced_gradient  ,
const SkRasterPipeline_GradientCtx c 
)

Definition at line 3445 of file SkRasterPipeline_opts.h.

3445 {
3446 auto t = r;
3447 auto idx = trunc_(t * static_cast<float>(c->stopCount-1));
3448 gradient_lookup(c, idx, t, &r, &g, &b, &a);
3449}
SI void gradient_lookup(const SkRasterPipeline_GradientCtx *c, U32 idx, F t, F *r, F *g, F *b, F *a)

◆ STAGE() [49/188]

SK_OPTS_NS::STAGE ( force_opaque  ,
NoCtx   
)

Definition at line 2453 of file SkRasterPipeline_opts.h.

2453{ a = F1; }

◆ STAGE() [50/188]

SK_OPTS_NS::STAGE ( force_opaque_dst  ,
NoCtx   
)

Definition at line 2454 of file SkRasterPipeline_opts.h.

2454{ da = F1; }

◆ STAGE() [51/188]

SK_OPTS_NS::STAGE ( gamma_  ,
const float *  G 
)

Definition at line 2743 of file SkRasterPipeline_opts.h.

2743 {
2744 auto fn = [&](F v) {
2745 U32 sign;
2746 v = strip_sign(v, &sign);
2747 return apply_sign(approx_powf(v, *G), sign);
2748 };
2749 r = fn(r);
2750 g = fn(g);
2751 b = fn(b);
2752}
SI F apply_sign(F x, U32 sign)
SI F strip_sign(F x, U32 *sign)

◆ STAGE() [52/188]

SK_OPTS_NS::STAGE ( gather_10101010_xr  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 3062 of file SkRasterPipeline_opts.h.

3062 {
3063 const uint64_t* ptr;
3064 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
3065 from_10101010_xr(gather(ptr, ix), &r, &g, &b, &a);
3066}
SI void from_10101010_xr(U64 _10x6, F *r, F *g, F *b, F *a)

◆ STAGE() [53/188]

SK_OPTS_NS::STAGE ( gather_1010102  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 3052 of file SkRasterPipeline_opts.h.

3052 {
3053 const uint32_t* ptr;
3054 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
3055 from_1010102(gather(ptr, ix), &r,&g,&b,&a);
3056}
SI void from_1010102(U32 rgba, F *r, F *g, F *b, F *a)

◆ STAGE() [54/188]

SK_OPTS_NS::STAGE ( gather_1010102_xr  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 3057 of file SkRasterPipeline_opts.h.

3057 {
3058 const uint32_t* ptr;
3059 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
3060 from_1010102_xr(gather(ptr, ix), &r,&g,&b,&a);
3061}
SI void from_1010102_xr(U32 rgba, F *r, F *g, F *b, F *a)

◆ STAGE() [55/188]

SK_OPTS_NS::STAGE ( gather_10x6  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 3019 of file SkRasterPipeline_opts.h.

3019 {
3020 const uint64_t* ptr;
3021 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
3022 from_10x6(gather(ptr, ix), &r, &g, &b, &a);
3023}
SI void from_10x6(U64 _10x6, F *r, F *g, F *b, F *a)

◆ STAGE() [56/188]

SK_OPTS_NS::STAGE ( gather_16161616  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 2995 of file SkRasterPipeline_opts.h.

2995 {
2996 const uint64_t* ptr;
2997 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2998 from_16161616(gather(ptr, ix), &r, &g, &b, &a);
2999}
SI void from_16161616(U64 _16161616, F *r, F *g, F *b, F *a)

◆ STAGE() [57/188]

SK_OPTS_NS::STAGE ( gather_4444  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 2875 of file SkRasterPipeline_opts.h.

2875 {
2876 const uint16_t* ptr;
2877 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
2878 from_4444(gather(ptr, ix), &r,&g,&b,&a);
2879}
SI void from_4444(U16 _4444, F *r, F *g, F *b, F *a)

◆ STAGE() [58/188]

SK_OPTS_NS::STAGE ( gather_565  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 2852 of file SkRasterPipeline_opts.h.

2852 {
2853 const uint16_t* ptr;
2854 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
2855 from_565(gather(ptr, ix), &r,&g,&b);
2856 a = F1;
2857}
SI void from_565(U16 _565, F *r, F *g, F *b)

◆ STAGE() [59/188]

SK_OPTS_NS::STAGE ( gather_8888  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 2897 of file SkRasterPipeline_opts.h.

2897 {
2898 const uint32_t* ptr;
2899 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
2900 from_8888(gather(ptr, ix), &r,&g,&b,&a);
2901}

◆ STAGE() [60/188]

SK_OPTS_NS::STAGE ( gather_a16  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 2947 of file SkRasterPipeline_opts.h.

2947 {
2948 const uint16_t* ptr;
2949 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2950 r = g = b = F0;
2951 a = from_short(gather(ptr, ix));
2952}
SI F from_short(U16 s)

◆ STAGE() [61/188]

SK_OPTS_NS::STAGE ( gather_a8  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 2821 of file SkRasterPipeline_opts.h.

2821 {
2822 const uint8_t* ptr;
2823 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
2824 r = g = b = F0;
2825 a = from_byte(gather(ptr, ix));
2826}

◆ STAGE() [62/188]

SK_OPTS_NS::STAGE ( gather_af16  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 3165 of file SkRasterPipeline_opts.h.

3165 {
3166 const uint16_t* ptr;
3167 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
3168 r = g = b = F0;
3169 a = from_half(gather(ptr, ix));
3170}
SI F from_half(U16 h)

◆ STAGE() [63/188]

SK_OPTS_NS::STAGE ( gather_f16  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 3129 of file SkRasterPipeline_opts.h.

3129 {
3130 const uint64_t* ptr;
3131 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
3132 auto px = gather(ptr, ix);
3133
3134 U16 R,G,B,A;
3135 load4((const uint16_t*)&px, &R,&G,&B,&A);
3136 r = from_half(R);
3137 g = from_half(G);
3138 b = from_half(B);
3139 a = from_half(A);
3140}

◆ STAGE() [64/188]

SK_OPTS_NS::STAGE ( gather_f32  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 3222 of file SkRasterPipeline_opts.h.

3222 {
3223 const float* ptr;
3224 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
3225 r = gather(ptr, 4*ix + 0);
3226 g = gather(ptr, 4*ix + 1);
3227 b = gather(ptr, 4*ix + 2);
3228 a = gather(ptr, 4*ix + 3);
3229}

◆ STAGE() [65/188]

SK_OPTS_NS::STAGE ( gather_rg1616  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 2972 of file SkRasterPipeline_opts.h.

2972 {
2973 const uint32_t* ptr;
2974 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2975 from_1616(gather(ptr, ix), &r, &g);
2976 b = F0;
2977 a = F1;
2978}
SI void from_1616(U32 _1616, F *r, F *g)

◆ STAGE() [66/188]

SK_OPTS_NS::STAGE ( gather_rg88  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 2924 of file SkRasterPipeline_opts.h.

2924 {
2925 const uint16_t* ptr;
2926 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2927 from_88(gather(ptr, ix), &r, &g);
2928 b = F0;
2929 a = F1;
2930}
SI void from_88(U16 _88, F *r, F *g)

◆ STAGE() [67/188]

SK_OPTS_NS::STAGE ( gather_rgf16  ,
const SkRasterPipeline_GatherCtx ctx 
)

Definition at line 3196 of file SkRasterPipeline_opts.h.

3196 {
3197 const uint32_t* ptr;
3198 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
3199 auto px = gather(ptr, ix);
3200
3201 U16 R,G;
3202 load2((const uint16_t*)&px, &R, &G);
3203 r = from_half(R);
3204 g = from_half(G);
3205 b = F0;
3206 a = F1;
3207}
SI void load2(const uint16_t *ptr, U16 *r, U16 *g)

◆ STAGE() [68/188]

SK_OPTS_NS::STAGE ( gauss_a_to_rgba  ,
NoCtx   
)

Definition at line 4935 of file SkRasterPipeline_opts.h.

4935 {
4936 // x = 1 - x;
4937 // exp(-x * x * 4) - 0.018f;
4938 // ... now approximate with quartic
4939 //
4940 const float c4 = -2.26661229133605957031f;
4941 const float c3 = 2.89795351028442382812f;
4942 const float c2 = 0.21345567703247070312f;
4943 const float c1 = 0.15489584207534790039f;
4944 const float c0 = 0.00030726194381713867f;
4945 a = mad(a, mad(a, mad(a, mad(a, c4, c3), c2), c1), c0);
4946 r = a;
4947 g = a;
4948 b = a;
4949}

◆ STAGE() [69/188]

SK_OPTS_NS::STAGE ( gradient  ,
const SkRasterPipeline_GradientCtx c 
)

Definition at line 3451 of file SkRasterPipeline_opts.h.

3451 {
3452 auto t = r;
3453 U32 idx = U32_(0);
3454
3455 // N.B. The loop starts at 1 because idx 0 is the color to use before the first stop.
3456 for (size_t i = 1; i < c->stopCount; i++) {
3457 idx += (U32)if_then_else(t >= c->ts[i], I32_(1), I32_(0));
3458 }
3459
3460 gradient_lookup(c, idx, t, &r, &g, &b, &a);
3461}
SI constexpr I32 I32_(int32_t x)

◆ STAGE() [70/188]

SK_OPTS_NS::STAGE ( HLGinvish  ,
const skcms_TransferFunction ctx 
)

Definition at line 2789 of file SkRasterPipeline_opts.h.

2789 {
2790 auto fn = [&](F v) {
2791 U32 sign;
2792 v = strip_sign(v, &sign);
2793
2794 const float R = ctx->a, G = ctx->b,
2795 a = ctx->c, b = ctx->d, c = ctx->e,
2796 K = ctx->f + 1.0f;
2797
2798 v /= K;
2799 F r = if_then_else(v <= 1, R * approx_powf(v, G)
2800 , a * approx_log(v - b) + c);
2801
2802 return apply_sign(r, sign);
2803 };
2804 r = fn(r);
2805 g = fn(g);
2806 b = fn(b);
2807}
SI F approx_log(F x)
static const int K
Definition: daa.cpp:21

◆ STAGE() [71/188]

SK_OPTS_NS::STAGE ( HLGish  ,
const skcms_TransferFunction ctx 
)

Definition at line 2770 of file SkRasterPipeline_opts.h.

2770 {
2771 auto fn = [&](F v) {
2772 U32 sign;
2773 v = strip_sign(v, &sign);
2774
2775 const float R = ctx->a, G = ctx->b,
2776 a = ctx->c, b = ctx->d, c = ctx->e,
2777 K = ctx->f + 1.0f;
2778
2779 F r = if_then_else(v*R <= 1, approx_powf(v*R, G)
2780 , approx_exp((v-c)*a) + b);
2781
2782 return K * apply_sign(r, sign);
2783 };
2784 r = fn(r);
2785 g = fn(g);
2786 b = fn(b);
2787}
SI F approx_exp(F x)

◆ STAGE() [72/188]

SK_OPTS_NS::STAGE ( hsl_to_rgb  ,
NoCtx   
)

Definition at line 2476 of file SkRasterPipeline_opts.h.

2476 {
2477 // See GrRGBToHSLFilterEffect.fp
2478
2479 F h = r,
2480 s = g,
2481 l = b,
2482 c = (1.0f - abs_(2.0f * l - 1)) * s;
2483
2484 auto hue_to_rgb = [&](F hue) {
2485 F q = clamp_01_(abs_(fract(hue) * 6.0f - 3.0f) - 1.0f);
2486 return (q - 0.5f) * c + l;
2487 };
2488
2489 r = hue_to_rgb(h + 0.0f/3.0f);
2490 g = hue_to_rgb(h + 2.0f/3.0f);
2491 b = hue_to_rgb(h + 1.0f/3.0f);
2492}
static void hue(float dr, float dg, float db, float *sr, float *sg, float *sb)
Definition: hsl.cpp:92

◆ STAGE() [73/188]

SK_OPTS_NS::STAGE ( hue  ,
NoCtx   
)

Definition at line 2291 of file SkRasterPipeline_opts.h.

2291 {
2292 F R = r*a,
2293 G = g*a,
2294 B = b*a;
2295
2296 set_sat(&R, &G, &B, sat(dr,dg,db)*a);
2297 set_lum(&R, &G, &B, lum(dr,dg,db)*a);
2298 clip_color(&R,&G,&B, a*da);
2299
2300 r = mad(r, inv(da), mad(dr, inv(a), R));
2301 g = mad(g, inv(da), mad(dg, inv(a), G));
2302 b = mad(b, inv(da), mad(db, inv(a), B));
2303 a = a + nmad(a, da, da);
2304}
static void set_sat(float *r, float *g, float *b, float s)
Definition: hsl.cpp:57

◆ STAGE() [74/188]

SK_OPTS_NS::STAGE ( lerp_1_float  ,
const float *  c 
)

Definition at line 2655 of file SkRasterPipeline_opts.h.

2655 {
2656 r = lerp(dr, r, F_(*c));
2657 g = lerp(dg, g, F_(*c));
2658 b = lerp(db, b, F_(*c));
2659 a = lerp(da, a, F_(*c));
2660}

◆ STAGE() [75/188]

SK_OPTS_NS::STAGE ( lerp_565  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2686 of file SkRasterPipeline_opts.h.

2686 {
2687 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2688
2689 F cr,cg,cb;
2690 from_565(load<U16>(ptr), &cr, &cg, &cb);
2691
2692 F ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
2693
2694 r = lerp(dr, r, cr);
2695 g = lerp(dg, g, cg);
2696 b = lerp(db, b, cb);
2697 a = lerp(da, a, ca);
2698}
SI F alpha_coverage_from_rgb_coverage(F a, F da, F cr, F cg, F cb)

◆ STAGE() [76/188]

SK_OPTS_NS::STAGE ( lerp_native  ,
const float  scales[] 
)

Definition at line 2668 of file SkRasterPipeline_opts.h.

2668 {
2669 auto c = sk_unaligned_load<F>(scales);
2670 r = lerp(dr, r, c);
2671 g = lerp(dg, g, c);
2672 b = lerp(db, b, c);
2673 a = lerp(da, a, c);
2674}

◆ STAGE() [77/188]

SK_OPTS_NS::STAGE ( lerp_u8  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2675 of file SkRasterPipeline_opts.h.

2675 {
2676 auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
2677
2678 auto scales = load<U8>(ptr);
2679 auto c = from_byte(scales);
2680
2681 r = lerp(dr, r, c);
2682 g = lerp(dg, g, c);
2683 b = lerp(db, b, c);
2684 a = lerp(da, a, c);
2685}

◆ STAGE() [78/188]

SK_OPTS_NS::STAGE ( load_10101010_xr  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3067 of file SkRasterPipeline_opts.h.

3067 {
3068 auto ptr = ptr_at_xy<const uint64_t>(ctx, dx, dy);
3069 from_10101010_xr(load<U64>(ptr), &r,&g, &b, &a);
3070}

◆ STAGE() [79/188]

SK_OPTS_NS::STAGE ( load_10101010_xr_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3071 of file SkRasterPipeline_opts.h.

3071 {
3072 auto ptr = ptr_at_xy<const uint64_t>(ctx, dx, dy);
3073 from_10101010_xr(load<U64>(ptr), &dr, &dg, &db, &da);
3074}

◆ STAGE() [80/188]

SK_OPTS_NS::STAGE ( load_1010102  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3036 of file SkRasterPipeline_opts.h.

3036 {
3037 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
3038 from_1010102(load<U32>(ptr), &r,&g,&b,&a);
3039}

◆ STAGE() [81/188]

SK_OPTS_NS::STAGE ( load_1010102_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3040 of file SkRasterPipeline_opts.h.

3040 {
3041 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
3042 from_1010102(load<U32>(ptr), &dr,&dg,&db,&da);
3043}

◆ STAGE() [82/188]

SK_OPTS_NS::STAGE ( load_1010102_xr  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3044 of file SkRasterPipeline_opts.h.

3044 {
3045 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
3046 from_1010102_xr(load<U32>(ptr), &r,&g,&b,&a);
3047}

◆ STAGE() [83/188]

SK_OPTS_NS::STAGE ( load_1010102_xr_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3048 of file SkRasterPipeline_opts.h.

3048 {
3049 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
3050 from_1010102_xr(load<U32>(ptr), &dr,&dg,&db,&da);
3051}

◆ STAGE() [84/188]

SK_OPTS_NS::STAGE ( load_10x6  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3011 of file SkRasterPipeline_opts.h.

3011 {
3012 auto ptr = ptr_at_xy<const uint64_t>(ctx, dx, dy);
3013 from_10x6(load<U64>(ptr), &r,&g, &b, &a);
3014}

◆ STAGE() [85/188]

SK_OPTS_NS::STAGE ( load_10x6_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3015 of file SkRasterPipeline_opts.h.

3015 {
3016 auto ptr = ptr_at_xy<const uint64_t>(ctx, dx, dy);
3017 from_10x6(load<U64>(ptr), &dr, &dg, &db, &da);
3018}

◆ STAGE() [86/188]

SK_OPTS_NS::STAGE ( load_16161616  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2987 of file SkRasterPipeline_opts.h.

2987 {
2988 auto ptr = ptr_at_xy<const uint64_t>(ctx, dx, dy);
2989 from_16161616(load<U64>(ptr), &r,&g, &b, &a);
2990}

◆ STAGE() [87/188]

SK_OPTS_NS::STAGE ( load_16161616_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2991 of file SkRasterPipeline_opts.h.

2991 {
2992 auto ptr = ptr_at_xy<const uint64_t>(ctx, dx, dy);
2993 from_16161616(load<U64>(ptr), &dr, &dg, &db, &da);
2994}

◆ STAGE() [88/188]

SK_OPTS_NS::STAGE ( load_4444  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2867 of file SkRasterPipeline_opts.h.

2867 {
2868 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2869 from_4444(load<U16>(ptr), &r,&g,&b,&a);
2870}

◆ STAGE() [89/188]

SK_OPTS_NS::STAGE ( load_4444_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2871 of file SkRasterPipeline_opts.h.

2871 {
2872 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2873 from_4444(load<U16>(ptr), &dr,&dg,&db,&da);
2874}

◆ STAGE() [90/188]

SK_OPTS_NS::STAGE ( load_565  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2840 of file SkRasterPipeline_opts.h.

2840 {
2841 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2842
2843 from_565(load<U16>(ptr), &r,&g,&b);
2844 a = F1;
2845}

◆ STAGE() [91/188]

SK_OPTS_NS::STAGE ( load_565_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2846 of file SkRasterPipeline_opts.h.

2846 {
2847 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2848
2849 from_565(load<U16>(ptr), &dr,&dg,&db);
2850 da = F1;
2851}

◆ STAGE() [92/188]

SK_OPTS_NS::STAGE ( load_8888  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2889 of file SkRasterPipeline_opts.h.

2889 {
2890 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
2891 from_8888(load<U32>(ptr), &r,&g,&b,&a);
2892}

◆ STAGE() [93/188]

SK_OPTS_NS::STAGE ( load_8888_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2893 of file SkRasterPipeline_opts.h.

2893 {
2894 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
2895 from_8888(load<U32>(ptr), &dr,&dg,&db,&da);
2896}

◆ STAGE() [94/188]

SK_OPTS_NS::STAGE ( load_a16  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2937 of file SkRasterPipeline_opts.h.

2937 {
2938 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2939 r = g = b = F0;
2940 a = from_short(load<U16>(ptr));
2941}

◆ STAGE() [95/188]

SK_OPTS_NS::STAGE ( load_a16_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2942 of file SkRasterPipeline_opts.h.

2942 {
2943 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
2944 dr = dg = db = F0;
2945 da = from_short(load<U16>(ptr));
2946}

◆ STAGE() [96/188]

SK_OPTS_NS::STAGE ( load_a8  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2809 of file SkRasterPipeline_opts.h.

2809 {
2810 auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
2811
2812 r = g = b = F0;
2813 a = from_byte(load<U8>(ptr));
2814}

◆ STAGE() [97/188]

SK_OPTS_NS::STAGE ( load_a8_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2815 of file SkRasterPipeline_opts.h.

2815 {
2816 auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
2817
2818 dr = dg = db = F0;
2819 da = from_byte(load<U8>(ptr));
2820}

◆ STAGE() [98/188]

SK_OPTS_NS::STAGE ( load_af16  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3149 of file SkRasterPipeline_opts.h.

3149 {
3150 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
3151
3152 U16 A = load<U16>((const uint16_t*)ptr);
3153 r = F0;
3154 g = F0;
3155 b = F0;
3156 a = from_half(A);
3157}

◆ STAGE() [99/188]

SK_OPTS_NS::STAGE ( load_af16_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3158 of file SkRasterPipeline_opts.h.

3158 {
3159 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
3160
3161 U16 A = load<U16>((const uint16_t*)ptr);
3162 dr = dg = db = F0;
3163 da = from_half(A);
3164}

◆ STAGE() [100/188]

SK_OPTS_NS::STAGE ( load_dst  ,
const float *  ptr 
)

Definition at line 2145 of file SkRasterPipeline_opts.h.

2145 {
2146 dr = sk_unaligned_load<F>(ptr + 0*N);
2147 dg = sk_unaligned_load<F>(ptr + 1*N);
2148 db = sk_unaligned_load<F>(ptr + 2*N);
2149 da = sk_unaligned_load<F>(ptr + 3*N);
2150}

◆ STAGE() [101/188]

SK_OPTS_NS::STAGE ( load_f16  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3109 of file SkRasterPipeline_opts.h.

3109 {
3110 auto ptr = ptr_at_xy<const uint64_t>(ctx, dx,dy);
3111
3112 U16 R,G,B,A;
3113 load4((const uint16_t*)ptr, &R,&G,&B,&A);
3114 r = from_half(R);
3115 g = from_half(G);
3116 b = from_half(B);
3117 a = from_half(A);
3118}

◆ STAGE() [102/188]

SK_OPTS_NS::STAGE ( load_f16_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3119 of file SkRasterPipeline_opts.h.

3119 {
3120 auto ptr = ptr_at_xy<const uint64_t>(ctx, dx,dy);
3121
3122 U16 R,G,B,A;
3123 load4((const uint16_t*)ptr, &R,&G,&B,&A);
3124 dr = from_half(R);
3125 dg = from_half(G);
3126 db = from_half(B);
3127 da = from_half(A);
3128}

◆ STAGE() [103/188]

SK_OPTS_NS::STAGE ( load_f32  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3214 of file SkRasterPipeline_opts.h.

3214 {
3215 auto ptr = ptr_at_xy<const float>(ctx, 4*dx,4*dy);
3216 load4(ptr, &r,&g,&b,&a);
3217}

◆ STAGE() [104/188]

SK_OPTS_NS::STAGE ( load_f32_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3218 of file SkRasterPipeline_opts.h.

3218 {
3219 auto ptr = ptr_at_xy<const float>(ctx, 4*dx,4*dy);
3220 load4(ptr, &dr,&dg,&db,&da);
3221}

◆ STAGE() [105/188]

SK_OPTS_NS::STAGE ( load_rg1616  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2960 of file SkRasterPipeline_opts.h.

2960 {
2961 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
2962 b = F0;
2963 a = F1;
2964 from_1616(load<U32>(ptr), &r,&g);
2965}

◆ STAGE() [106/188]

SK_OPTS_NS::STAGE ( load_rg1616_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2966 of file SkRasterPipeline_opts.h.

2966 {
2967 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
2968 from_1616(load<U32>(ptr), &dr, &dg);
2969 db = F0;
2970 da = F1;
2971}

◆ STAGE() [107/188]

SK_OPTS_NS::STAGE ( load_rg88  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2912 of file SkRasterPipeline_opts.h.

2912 {
2913 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
2914 from_88(load<U16>(ptr), &r, &g);
2915 b = F0;
2916 a = F1;
2917}

◆ STAGE() [108/188]

SK_OPTS_NS::STAGE ( load_rg88_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2918 of file SkRasterPipeline_opts.h.

2918 {
2919 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
2920 from_88(load<U16>(ptr), &dr, &dg);
2921 db = F0;
2922 da = F1;
2923}

◆ STAGE() [109/188]

SK_OPTS_NS::STAGE ( load_rgf16  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3176 of file SkRasterPipeline_opts.h.

3176 {
3177 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
3178
3179 U16 R,G;
3180 load2((const uint16_t*)ptr, &R, &G);
3181 r = from_half(R);
3182 g = from_half(G);
3183 b = F0;
3184 a = F1;
3185}

◆ STAGE() [110/188]

SK_OPTS_NS::STAGE ( load_rgf16_dst  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3186 of file SkRasterPipeline_opts.h.

3186 {
3187 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
3188
3189 U16 R,G;
3190 load2((const uint16_t*)ptr, &R, &G);
3191 dr = from_half(R);
3192 dg = from_half(G);
3193 db = F0;
3194 da = F1;
3195}

◆ STAGE() [111/188]

SK_OPTS_NS::STAGE ( load_src  ,
const float *  ptr 
)

Definition at line 2115 of file SkRasterPipeline_opts.h.

2115 {
2116 r = sk_unaligned_load<F>(ptr + 0*N);
2117 g = sk_unaligned_load<F>(ptr + 1*N);
2118 b = sk_unaligned_load<F>(ptr + 2*N);
2119 a = sk_unaligned_load<F>(ptr + 3*N);
2120}

◆ STAGE() [112/188]

SK_OPTS_NS::STAGE ( load_src_rg  ,
float *  ptr 
)

Definition at line 2135 of file SkRasterPipeline_opts.h.

2135 {
2136 r = sk_unaligned_load<F>(ptr + 0*N);
2137 g = sk_unaligned_load<F>(ptr + 1*N);
2138}

◆ STAGE() [113/188]

SK_OPTS_NS::STAGE ( luminosity  ,
NoCtx   
)

Definition at line 2332 of file SkRasterPipeline_opts.h.

2332 {
2333 F R = dr*a,
2334 G = dg*a,
2335 B = db*a;
2336
2337 set_lum(&R, &G, &B, lum(r,g,b)*da);
2338 clip_color(&R,&G,&B, a*da);
2339
2340 r = mad(r, inv(da), mad(dr, inv(a), R));
2341 g = mad(g, inv(da), mad(dg, inv(a), G));
2342 b = mad(b, inv(da), mad(db, inv(a), B));
2343 a = a + nmad(a, da, da);
2344}

◆ STAGE() [114/188]

SK_OPTS_NS::STAGE ( mask_2pt_conical_degenerates  ,
SkRasterPipeline_2PtConicalCtx c 
)

Definition at line 3549 of file SkRasterPipeline_opts.h.

3549 {
3550 F& t = r;
3551 auto is_degenerate = (t <= 0) | (t != t);
3552 t = if_then_else(is_degenerate, F0, t);
3554}
static bool is_degenerate(const SkPath &path)
Definition: SkPath.cpp:73
uint32_t fMask[SkRasterPipeline_kMaxStride_highp]

◆ STAGE() [115/188]

SK_OPTS_NS::STAGE ( mask_2pt_conical_nan  ,
SkRasterPipeline_2PtConicalCtx c 
)

Definition at line 3542 of file SkRasterPipeline_opts.h.

3542 {
3543 F& t = r;
3544 auto is_degenerate = (t != t); // NaN
3545 t = if_then_else(is_degenerate, F0, t);
3547}

◆ STAGE() [116/188]

SK_OPTS_NS::STAGE ( matrix_2x3  ,
const float *  m 
)

Definition at line 3340 of file SkRasterPipeline_opts.h.

3340 {
3341 auto R = mad(r,m[0], mad(g,m[1], m[2])),
3342 G = mad(r,m[3], mad(g,m[4], m[5]));
3343 r = R;
3344 g = G;
3345}

◆ STAGE() [117/188]

SK_OPTS_NS::STAGE ( matrix_3x3  ,
const float *  m 
)

Definition at line 3346 of file SkRasterPipeline_opts.h.

3346 {
3347 auto R = mad(r,m[0], mad(g,m[3], b*m[6])),
3348 G = mad(r,m[1], mad(g,m[4], b*m[7])),
3349 B = mad(r,m[2], mad(g,m[5], b*m[8]));
3350 r = R;
3351 g = G;
3352 b = B;
3353}

◆ STAGE() [118/188]

SK_OPTS_NS::STAGE ( matrix_3x4  ,
const float *  m 
)

Definition at line 3354 of file SkRasterPipeline_opts.h.

3354 {
3355 auto R = mad(r,m[0], mad(g,m[3], mad(b,m[6], m[ 9]))),
3356 G = mad(r,m[1], mad(g,m[4], mad(b,m[7], m[10]))),
3357 B = mad(r,m[2], mad(g,m[5], mad(b,m[8], m[11])));
3358 r = R;
3359 g = G;
3360 b = B;
3361}

◆ STAGE() [119/188]

SK_OPTS_NS::STAGE ( matrix_4x3  ,
const float *  m 
)

Definition at line 3372 of file SkRasterPipeline_opts.h.

3372 {
3373 auto X = r,
3374 Y = g;
3375
3376 r = mad(X, m[0], mad(Y, m[4], m[ 8]));
3377 g = mad(X, m[1], mad(Y, m[5], m[ 9]));
3378 b = mad(X, m[2], mad(Y, m[6], m[10]));
3379 a = mad(X, m[3], mad(Y, m[7], m[11]));
3380}

◆ STAGE() [120/188]

SK_OPTS_NS::STAGE ( matrix_4x5  ,
const float *  m 
)

Definition at line 3362 of file SkRasterPipeline_opts.h.

3362 {
3363 auto R = mad(r,m[ 0], mad(g,m[ 1], mad(b,m[ 2], mad(a,m[ 3], m[ 4])))),
3364 G = mad(r,m[ 5], mad(g,m[ 6], mad(b,m[ 7], mad(a,m[ 8], m[ 9])))),
3365 B = mad(r,m[10], mad(g,m[11], mad(b,m[12], mad(a,m[13], m[14])))),
3366 A = mad(r,m[15], mad(g,m[16], mad(b,m[17], mad(a,m[18], m[19]))));
3367 r = R;
3368 g = G;
3369 b = B;
3370 a = A;
3371}

◆ STAGE() [121/188]

SK_OPTS_NS::STAGE ( matrix_perspective  ,
const float *  m 
)

Definition at line 3381 of file SkRasterPipeline_opts.h.

3381 {
3382 // N.B. Unlike the other matrix_ stages, this matrix is row-major.
3383 auto R = mad(r,m[0], mad(g,m[1], m[2])),
3384 G = mad(r,m[3], mad(g,m[4], m[5])),
3385 Z = mad(r,m[6], mad(g,m[7], m[8]));
3386 r = R * rcp_precise(Z);
3387 g = G * rcp_precise(Z);
3388}
#define Z

◆ STAGE() [122/188]

SK_OPTS_NS::STAGE ( matrix_scale_translate  ,
const float *  m 
)

Definition at line 3336 of file SkRasterPipeline_opts.h.

3336 {
3337 r = mad(r,m[0], m[2]);
3338 g = mad(g,m[1], m[3]);
3339}

◆ STAGE() [123/188]

SK_OPTS_NS::STAGE ( matrix_translate  ,
const float *  m 
)

Definition at line 3332 of file SkRasterPipeline_opts.h.

3332 {
3333 r += m[0];
3334 g += m[1];
3335}

◆ STAGE() [124/188]

SK_OPTS_NS::STAGE ( mipmap_linear_finish  ,
SkRasterPipeline_MipmapCtx ctx 
)

Definition at line 3824 of file SkRasterPipeline_opts.h.

3824 {
3825 r = lerp(sk_unaligned_load<F>(ctx->r), r, F_(ctx->lowerWeight));
3826 g = lerp(sk_unaligned_load<F>(ctx->g), g, F_(ctx->lowerWeight));
3827 b = lerp(sk_unaligned_load<F>(ctx->b), b, F_(ctx->lowerWeight));
3828 a = lerp(sk_unaligned_load<F>(ctx->a), a, F_(ctx->lowerWeight));
3829}
float g[SkRasterPipeline_kMaxStride_highp]
float r[SkRasterPipeline_kMaxStride_highp]
float b[SkRasterPipeline_kMaxStride_highp]
float a[SkRasterPipeline_kMaxStride_highp]

◆ STAGE() [125/188]

SK_OPTS_NS::STAGE ( mipmap_linear_init  ,
SkRasterPipeline_MipmapCtx ctx 
)

Definition at line 3809 of file SkRasterPipeline_opts.h.

3809 {
3810 sk_unaligned_store(ctx->x, r);
3811 sk_unaligned_store(ctx->y, g);
3812}
float x[SkRasterPipeline_kMaxStride_highp]
float y[SkRasterPipeline_kMaxStride_highp]

◆ STAGE() [126/188]

SK_OPTS_NS::STAGE ( mipmap_linear_update  ,
SkRasterPipeline_MipmapCtx ctx 
)

Definition at line 3814 of file SkRasterPipeline_opts.h.

3814 {
3815 sk_unaligned_store(ctx->r, r);
3816 sk_unaligned_store(ctx->g, g);
3817 sk_unaligned_store(ctx->b, b);
3818 sk_unaligned_store(ctx->a, a);
3819
3820 r = sk_unaligned_load<F>(ctx->x) * ctx->scaleX;
3821 g = sk_unaligned_load<F>(ctx->y) * ctx->scaleY;
3822}

◆ STAGE() [127/188]

SK_OPTS_NS::STAGE ( mirror_x  ,
const SkRasterPipeline_TileCtx ctx 
)

Definition at line 3260 of file SkRasterPipeline_opts.h.

3260{ r = exclusive_mirror(r, ctx); }
SI F exclusive_mirror(F v, const SkRasterPipeline_TileCtx *ctx)

◆ STAGE() [128/188]

SK_OPTS_NS::STAGE ( mirror_x_1  ,
NoCtx   
)

Definition at line 3265 of file SkRasterPipeline_opts.h.

3265{ r = clamp_01_(abs_( (r-1.0f) - two(floor_((r-1.0f)*0.5f)) - 1.0f )); }

◆ STAGE() [129/188]

SK_OPTS_NS::STAGE ( mirror_y  ,
const SkRasterPipeline_TileCtx ctx 
)

Definition at line 3261 of file SkRasterPipeline_opts.h.

3261{ g = exclusive_mirror(g, ctx); }

◆ STAGE() [130/188]

SK_OPTS_NS::STAGE ( move_dst_src  ,
NoCtx   
)

Definition at line 2416 of file SkRasterPipeline_opts.h.

2416 {
2417 r = dr;
2418 g = dg;
2419 b = db;
2420 a = da;
2421}

◆ STAGE() [131/188]

SK_OPTS_NS::STAGE ( move_src_dst  ,
NoCtx   
)

Definition at line 2410 of file SkRasterPipeline_opts.h.

2410 {
2411 dr = r;
2412 dg = g;
2413 db = b;
2414 da = a;
2415}

◆ STAGE() [132/188]

SK_OPTS_NS::STAGE ( negate_x  ,
NoCtx   
)

Definition at line 3505 of file SkRasterPipeline_opts.h.

3505{ r = -r; }

◆ STAGE() [133/188]

SK_OPTS_NS::STAGE ( parametric  ,
const skcms_TransferFunction ctx 
)

Definition at line 2729 of file SkRasterPipeline_opts.h.

2729 {
2730 auto fn = [&](F v) {
2731 U32 sign;
2732 v = strip_sign(v, &sign);
2733
2734 F r = if_then_else(v <= ctx->d, mad(ctx->c, v, ctx->f)
2735 , approx_powf(mad(ctx->a, v, ctx->b), ctx->g) + ctx->e);
2736 return apply_sign(r, sign);
2737 };
2738 r = fn(r);
2739 g = fn(g);
2740 b = fn(b);
2741}

◆ STAGE() [134/188]

SK_OPTS_NS::STAGE ( perlin_noise  ,
SkRasterPipeline_PerlinNoiseCtx ctx 
)

Definition at line 3710 of file SkRasterPipeline_opts.h.

3710 {
3711 F noiseVecX = (r + 0.5) * ctx->baseFrequencyX;
3712 F noiseVecY = (g + 0.5) * ctx->baseFrequencyY;
3713 r = g = b = a = F0;
3714 F stitchDataX = F_(ctx->stitchDataInX);
3715 F stitchDataY = F_(ctx->stitchDataInY);
3716 F ratio = F1;
3717
3718 for (int octave = 0; octave < ctx->numOctaves; ++octave) {
3719 // Calculate noise coordinates. (Roughly $noise_helper in Graphite)
3720 F floorValX = floor_(noiseVecX);
3721 F floorValY = floor_(noiseVecY);
3722 F ceilValX = floorValX + 1.0f;
3723 F ceilValY = floorValY + 1.0f;
3724 F fractValX = noiseVecX - floorValX;
3725 F fractValY = noiseVecY - floorValY;
3726
3727 if (ctx->stitching) {
3728 // If we are stitching, wrap the coordinates to the stitch position.
3729 floorValX -= sk_bit_cast<F>(cond_to_mask(floorValX >= stitchDataX) &
3730 sk_bit_cast<I32>(stitchDataX));
3731 floorValY -= sk_bit_cast<F>(cond_to_mask(floorValY >= stitchDataY) &
3732 sk_bit_cast<I32>(stitchDataY));
3733 ceilValX -= sk_bit_cast<F>(cond_to_mask(ceilValX >= stitchDataX) &
3734 sk_bit_cast<I32>(stitchDataX));
3735 ceilValY -= sk_bit_cast<F>(cond_to_mask(ceilValY >= stitchDataY) &
3736 sk_bit_cast<I32>(stitchDataY));
3737 }
3738
3739 U32 latticeLookup = (U32)(iround(floorValX)) & 0xFF;
3740 F latticeIdxX = cast(expand(gather(ctx->latticeSelector, latticeLookup)));
3741 latticeLookup = (U32)(iround(ceilValX)) & 0xFF;
3742 F latticeIdxY = cast(expand(gather(ctx->latticeSelector, latticeLookup)));
3743
3744 U32 b00 = (U32)(iround(latticeIdxX + floorValY)) & 0xFF;
3745 U32 b10 = (U32)(iround(latticeIdxY + floorValY)) & 0xFF;
3746 U32 b01 = (U32)(iround(latticeIdxX + ceilValY)) & 0xFF;
3747 U32 b11 = (U32)(iround(latticeIdxY + ceilValY)) & 0xFF;
3748
3749 // Calculate noise colors. (Roughly $noise_function in Graphite)
3750 // Apply Hermite interpolation to the fractional value.
3751 F smoothX = fractValX * fractValX * (3.0f - 2.0f * fractValX);
3752 F smoothY = fractValY * fractValY * (3.0f - 2.0f * fractValY);
3753
3754 F color[4];
3755 const uint32_t* channelNoiseData = reinterpret_cast<const uint32_t*>(ctx->noiseData);
3756 for (int channel = 0; channel < 4; ++channel) {
3757 U32 sample00 = gather(channelNoiseData, b00);
3758 U32 sample10 = gather(channelNoiseData, b10);
3759 U32 sample01 = gather(channelNoiseData, b01);
3760 U32 sample11 = gather(channelNoiseData, b11);
3761 channelNoiseData += 256;
3762
3763 F u = compute_perlin_vector(sample00, fractValX, fractValY);
3764 F v = compute_perlin_vector(sample10, fractValX - 1.0f, fractValY);
3765 F A = lerp(u, v, smoothX);
3766
3767 u = compute_perlin_vector(sample01, fractValX, fractValY - 1.0f);
3768 v = compute_perlin_vector(sample11, fractValX - 1.0f, fractValY - 1.0f);
3769 F B = lerp(u, v, smoothX);
3770
3771 color[channel] = lerp(A, B, smoothY);
3772 }
3773
3775 // For kTurbulence the result is: abs(noise[-1,1])
3776 color[0] = abs_(color[0]);
3777 color[1] = abs_(color[1]);
3778 color[2] = abs_(color[2]);
3779 color[3] = abs_(color[3]);
3780 }
3781
3782 r = mad(color[0], ratio, r);
3783 g = mad(color[1], ratio, g);
3784 b = mad(color[2], ratio, b);
3785 a = mad(color[3], ratio, a);
3786
3787 // Scale inputs for the next round.
3788 noiseVecX *= 2.0f;
3789 noiseVecY *= 2.0f;
3790 stitchDataX *= 2.0f;
3791 stitchDataY *= 2.0f;
3792 ratio *= 0.5f;
3793 }
3794
3796 // For kFractalNoise the result is: noise[-1,1] * 0.5 + 0.5
3797 r = mad(r, 0.5f, 0.5f);
3798 g = mad(g, 0.5f, 0.5f);
3799 b = mad(b, 0.5f, 0.5f);
3800 a = mad(a, 0.5f, 0.5f);
3801 }
3802
3803 r = clamp_01_(r) * a;
3804 g = clamp_01_(g) * a;
3805 b = clamp_01_(b) * a;
3806 a = clamp_01_(a);
3807}
SI I32 iround(F v)
SI F compute_perlin_vector(U32 sample, F x, F y)

◆ STAGE() [135/188]

SK_OPTS_NS::STAGE ( PQish  ,
const skcms_TransferFunction ctx 
)

Definition at line 2754 of file SkRasterPipeline_opts.h.

2754 {
2755 auto fn = [&](F v) {
2756 U32 sign;
2757 v = strip_sign(v, &sign);
2758
2759 F r = approx_powf(max(mad(ctx->b, approx_powf(v, ctx->c), ctx->a), 0.0f)
2760 / (mad(ctx->e, approx_powf(v, ctx->c), ctx->d)),
2761 ctx->f);
2762
2763 return apply_sign(r, sign);
2764 };
2765 r = fn(r);
2766 g = fn(g);
2767 b = fn(b);
2768}

◆ STAGE() [136/188]

SK_OPTS_NS::STAGE ( premul  ,
NoCtx   
)

Definition at line 2429 of file SkRasterPipeline_opts.h.

2429 {
2430 r = r * a;
2431 g = g * a;
2432 b = b * a;
2433}

◆ STAGE() [137/188]

SK_OPTS_NS::STAGE ( premul_dst  ,
NoCtx   
)

Definition at line 2434 of file SkRasterPipeline_opts.h.

2434 {
2435 dr = dr * da;
2436 dg = dg * da;
2437 db = db * da;
2438}

◆ STAGE() [138/188]

SK_OPTS_NS::STAGE ( repeat_x  ,
const SkRasterPipeline_TileCtx ctx 
)

Definition at line 3258 of file SkRasterPipeline_opts.h.

3258{ r = exclusive_repeat(r, ctx); }
SI F exclusive_repeat(F v, const SkRasterPipeline_TileCtx *ctx)

◆ STAGE() [139/188]

SK_OPTS_NS::STAGE ( repeat_x_1  ,
NoCtx   
)

Definition at line 3264 of file SkRasterPipeline_opts.h.

3264{ r = clamp_01_(r - floor_(r)); }

◆ STAGE() [140/188]

SK_OPTS_NS::STAGE ( repeat_y  ,
const SkRasterPipeline_TileCtx ctx 
)

Definition at line 3259 of file SkRasterPipeline_opts.h.

3259{ g = exclusive_repeat(g, ctx); }

◆ STAGE() [141/188]

SK_OPTS_NS::STAGE ( rgb_to_hsl  ,
NoCtx   
)

Definition at line 2456 of file SkRasterPipeline_opts.h.

2456 {
2457 F mx = max(r, max(g,b)),
2458 mn = min(r, min(g,b)),
2459 d = mx - mn,
2460 d_rcp = 1.0f / d;
2461
2462 F h = (1/6.0f) *
2463 if_then_else(mx == mn, 0.0f,
2464 if_then_else(mx == r, (g-b)*d_rcp + if_then_else(g < b, 6.0f, 0.0f),
2465 if_then_else(mx == g, (b-r)*d_rcp + 2.0f,
2466 (r-g)*d_rcp + 4.0f)));
2467
2468 F l = (mx + mn) * 0.5f;
2469 F s = if_then_else(mx == mn, 0.0f,
2470 d / if_then_else(l > 0.5f, 2.0f-mx-mn, mx+mn));
2471
2472 r = h;
2473 g = s;
2474 b = l;
2475}

◆ STAGE() [142/188]

SK_OPTS_NS::STAGE ( saturation  ,
NoCtx   
)

Definition at line 2305 of file SkRasterPipeline_opts.h.

2305 {
2306 F R = dr*a,
2307 G = dg*a,
2308 B = db*a;
2309
2310 set_sat(&R, &G, &B, sat( r, g, b)*da);
2311 set_lum(&R, &G, &B, lum(dr,dg,db)* a); // (This is not redundant.)
2312 clip_color(&R,&G,&B, a*da);
2313
2314 r = mad(r, inv(da), mad(dr, inv(a), R));
2315 g = mad(g, inv(da), mad(dg, inv(a), G));
2316 b = mad(b, inv(da), mad(db, inv(a), B));
2317 a = a + nmad(a, da, da);
2318}

◆ STAGE() [143/188]

SK_OPTS_NS::STAGE ( scale_1_float  ,
const float *  c 
)

Definition at line 2620 of file SkRasterPipeline_opts.h.

2620 {
2621 r = r * *c;
2622 g = g * *c;
2623 b = b * *c;
2624 a = a * *c;
2625}

◆ STAGE() [144/188]

SK_OPTS_NS::STAGE ( scale_565  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2637 of file SkRasterPipeline_opts.h.

2637 {
2638 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2639
2640 F cr,cg,cb;
2641 from_565(load<U16>(ptr), &cr, &cg, &cb);
2642
2643 F ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
2644
2645 r = r * cr;
2646 g = g * cg;
2647 b = b * cb;
2648 a = a * ca;
2649}

◆ STAGE() [145/188]

SK_OPTS_NS::STAGE ( scale_native  ,
const float  scales[] 
)

Definition at line 2661 of file SkRasterPipeline_opts.h.

2661 {
2662 auto c = sk_unaligned_load<F>(scales);
2663 r = r * c;
2664 g = g * c;
2665 b = b * c;
2666 a = a * c;
2667}

◆ STAGE() [146/188]

SK_OPTS_NS::STAGE ( scale_u8  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2626 of file SkRasterPipeline_opts.h.

2626 {
2627 auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
2628
2629 auto scales = load<U8>(ptr);
2630 auto c = from_byte(scales);
2631
2632 r = r * c;
2633 g = g * c;
2634 b = b * c;
2635 a = a * c;
2636}

◆ STAGE() [147/188]

SK_OPTS_NS::STAGE ( seed_shader  ,
NoCtx   
)

Definition at line 2033 of file SkRasterPipeline_opts.h.

2033 {
2034 static constexpr float iota[] = {
2035 0.5f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5f, 6.5f, 7.5f,
2036 8.5f, 9.5f,10.5f,11.5f,12.5f,13.5f,14.5f,15.5f,
2037 };
2038 static_assert(std::size(iota) >= SkRasterPipeline_kMaxStride_highp);
2039
2040 // It's important for speed to explicitly cast(dx) and cast(dy),
2041 // which has the effect of splatting them to vectors before converting to floats.
2042 // On Intel this breaks a data dependency on previous loop iterations' registers.
2043 r = cast(U32_(dx)) + sk_unaligned_load<F>(iota);
2044 g = cast(U32_(dy)) + 0.5f;
2045 b = F1; // This is w=1 for matrix multiplies by the device coords.
2046 a = F0;
2047}

◆ STAGE() [148/188]

SK_OPTS_NS::STAGE ( set_rgb  ,
const float *  rgb 
)

Definition at line 2387 of file SkRasterPipeline_opts.h.

2387 {
2388 r = F_(rgb[0]);
2389 g = F_(rgb[1]);
2390 b = F_(rgb[2]);
2391}

◆ STAGE() [149/188]

SK_OPTS_NS::STAGE ( srcover_rgba_8888  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2346 of file SkRasterPipeline_opts.h.

2346 {
2347 auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
2348
2349 U32 dst = load<U32>(ptr);
2350 dr = cast((dst ) & 0xff);
2351 dg = cast((dst >> 8) & 0xff);
2352 db = cast((dst >> 16) & 0xff);
2353 da = cast((dst >> 24) );
2354 // {dr,dg,db,da} are in [0,255]
2355 // { r, g, b, a} are in [0, 1] (but may be out of gamut)
2356
2357 r = mad(dr, inv(a), r*255.0f);
2358 g = mad(dg, inv(a), g*255.0f);
2359 b = mad(db, inv(a), b*255.0f);
2360 a = mad(da, inv(a), a*255.0f);
2361 // { r, g, b, a} are now in [0,255] (but may be out of gamut)
2362
2363 // to_unorm() clamps back to gamut. Scaling by 1 since we're already 255-biased.
2364 dst = to_unorm(r, 1, 255)
2365 | to_unorm(g, 1, 255) << 8
2366 | to_unorm(b, 1, 255) << 16
2367 | to_unorm(a, 1, 255) << 24;
2368 store(ptr, dst);
2369}
SI void store(P *ptr, const T &val)

◆ STAGE() [150/188]

SK_OPTS_NS::STAGE ( store_10101010_xr  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3075 of file SkRasterPipeline_opts.h.

3075 {
3076 static constexpr float min = -0.752941f;
3077 static constexpr float max = 1.25098f;
3078 static constexpr float range = max - min;
3079 auto ptr = ptr_at_xy<uint16_t>(ctx, 4*dx,4*dy);
3080
3081 U16 R = pack(to_unorm((r - min) / range, 1023)) << 6,
3082 G = pack(to_unorm((g - min) / range, 1023)) << 6,
3083 B = pack(to_unorm((b - min) / range, 1023)) << 6,
3084 A = pack(to_unorm((a - min) / range, 1023)) << 6;
3085
3086 store4(ptr, R,G,B,A);
3087}
static uint32_t pack(SkFixed f, unsigned max, SkFixed one)

◆ STAGE() [151/188]

SK_OPTS_NS::STAGE ( store_1010102  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3088 of file SkRasterPipeline_opts.h.

3088 {
3089 auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
3090
3091 U32 px = to_unorm(r, 1023)
3092 | to_unorm(g, 1023) << 10
3093 | to_unorm(b, 1023) << 20
3094 | to_unorm(a, 3) << 30;
3095 store(ptr, px);
3096}

◆ STAGE() [152/188]

SK_OPTS_NS::STAGE ( store_1010102_xr  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3097 of file SkRasterPipeline_opts.h.

3097 {
3098 auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
3099 static constexpr float min = -0.752941f;
3100 static constexpr float max = 1.25098f;
3101 static constexpr float range = max - min;
3102 U32 px = to_unorm((r - min) / range, 1023)
3103 | to_unorm((g - min) / range, 1023) << 10
3104 | to_unorm((b - min) / range, 1023) << 20
3105 | to_unorm(a, 3) << 30;
3106 store(ptr, px);
3107}

◆ STAGE() [153/188]

SK_OPTS_NS::STAGE ( store_10x6  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3024 of file SkRasterPipeline_opts.h.

3024 {
3025 auto ptr = ptr_at_xy<uint16_t>(ctx, 4*dx,4*dy);
3026
3027 U16 R = pack(to_unorm(r, 1023)) << 6,
3028 G = pack(to_unorm(g, 1023)) << 6,
3029 B = pack(to_unorm(b, 1023)) << 6,
3030 A = pack(to_unorm(a, 1023)) << 6;
3031
3032 store4(ptr, R,G,B,A);
3033}

◆ STAGE() [154/188]

SK_OPTS_NS::STAGE ( store_16161616  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3000 of file SkRasterPipeline_opts.h.

3000 {
3001 auto ptr = ptr_at_xy<uint16_t>(ctx, 4*dx,4*dy);
3002
3003 U16 R = pack(to_unorm(r, 65535)),
3004 G = pack(to_unorm(g, 65535)),
3005 B = pack(to_unorm(b, 65535)),
3006 A = pack(to_unorm(a, 65535));
3007
3008 store4(ptr, R,G,B,A);
3009}

◆ STAGE() [155/188]

SK_OPTS_NS::STAGE ( store_4444  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2880 of file SkRasterPipeline_opts.h.

2880 {
2881 auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
2882 U16 px = pack( to_unorm(r, 15) << 12
2883 | to_unorm(g, 15) << 8
2884 | to_unorm(b, 15) << 4
2885 | to_unorm(a, 15) );
2886 store(ptr, px);
2887}

◆ STAGE() [156/188]

SK_OPTS_NS::STAGE ( store_565  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2858 of file SkRasterPipeline_opts.h.

2858 {
2859 auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
2860
2861 U16 px = pack( to_unorm(r, 31) << 11
2862 | to_unorm(g, 63) << 5
2863 | to_unorm(b, 31) );
2864 store(ptr, px);
2865}

◆ STAGE() [157/188]

SK_OPTS_NS::STAGE ( store_8888  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2902 of file SkRasterPipeline_opts.h.

2902 {
2903 auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
2904
2905 U32 px = to_unorm(r, 255)
2906 | to_unorm(g, 255) << 8
2907 | to_unorm(b, 255) << 16
2908 | to_unorm(a, 255) << 24;
2909 store(ptr, px);
2910}

◆ STAGE() [158/188]

SK_OPTS_NS::STAGE ( store_a16  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2953 of file SkRasterPipeline_opts.h.

2953 {
2954 auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
2955
2956 U16 px = pack(to_unorm(a, 65535));
2957 store(ptr, px);
2958}

◆ STAGE() [159/188]

SK_OPTS_NS::STAGE ( store_a8  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2827 of file SkRasterPipeline_opts.h.

2827 {
2828 auto ptr = ptr_at_xy<uint8_t>(ctx, dx,dy);
2829
2830 U8 packed = pack(pack(to_unorm(a, 255)));
2831 store(ptr, packed);
2832}

◆ STAGE() [160/188]

SK_OPTS_NS::STAGE ( store_af16  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3171 of file SkRasterPipeline_opts.h.

3171 {
3172 auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
3173 store(ptr, to_half(a));
3174}

◆ STAGE() [161/188]

SK_OPTS_NS::STAGE ( store_dst  ,
float *  ptr 
)

Definition at line 2153 of file SkRasterPipeline_opts.h.

2153 {
2154 sk_unaligned_store(ptr + 0*N, dr);
2155 sk_unaligned_store(ptr + 1*N, dg);
2156 sk_unaligned_store(ptr + 2*N, db);
2157 sk_unaligned_store(ptr + 3*N, da);
2158}

◆ STAGE() [162/188]

SK_OPTS_NS::STAGE ( store_f16  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3141 of file SkRasterPipeline_opts.h.

3141 {
3142 auto ptr = ptr_at_xy<uint64_t>(ctx, dx,dy);
3143 store4((uint16_t*)ptr, to_half(r)
3144 , to_half(g)
3145 , to_half(b)
3146 , to_half(a));
3147}

◆ STAGE() [163/188]

SK_OPTS_NS::STAGE ( store_f32  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3230 of file SkRasterPipeline_opts.h.

3230 {
3231 auto ptr = ptr_at_xy<float>(ctx, 4*dx,4*dy);
3232 store4(ptr, r,g,b,a);
3233}

◆ STAGE() [164/188]

SK_OPTS_NS::STAGE ( store_r8  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2833 of file SkRasterPipeline_opts.h.

2833 {
2834 auto ptr = ptr_at_xy<uint8_t>(ctx, dx,dy);
2835
2836 U8 packed = pack(pack(to_unorm(r, 255)));
2837 store(ptr, packed);
2838}

◆ STAGE() [165/188]

SK_OPTS_NS::STAGE ( store_rg1616  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2979 of file SkRasterPipeline_opts.h.

2979 {
2980 auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
2981
2982 U32 px = to_unorm(r, 65535)
2983 | to_unorm(g, 65535) << 16;
2984 store(ptr, px);
2985}

◆ STAGE() [166/188]

SK_OPTS_NS::STAGE ( store_rg88  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 2931 of file SkRasterPipeline_opts.h.

2931 {
2932 auto ptr = ptr_at_xy<uint16_t>(ctx, dx, dy);
2933 U16 px = pack( to_unorm(r, 255) | to_unorm(g, 255) << 8 );
2934 store(ptr, px);
2935}

◆ STAGE() [167/188]

SK_OPTS_NS::STAGE ( store_rgf16  ,
const SkRasterPipeline_MemoryCtx ctx 
)

Definition at line 3208 of file SkRasterPipeline_opts.h.

3208 {
3209 auto ptr = ptr_at_xy<uint32_t>(ctx, dx, dy);
3210 store2((uint16_t*)ptr, to_half(r)
3211 , to_half(g));
3212}
SI void store2(uint16_t *ptr, U16 r, U16 g)

◆ STAGE() [168/188]

SK_OPTS_NS::STAGE ( store_src  ,
float *  ptr 
)

Definition at line 2123 of file SkRasterPipeline_opts.h.

2123 {
2124 sk_unaligned_store(ptr + 0*N, r);
2125 sk_unaligned_store(ptr + 1*N, g);
2126 sk_unaligned_store(ptr + 2*N, b);
2127 sk_unaligned_store(ptr + 3*N, a);
2128}

◆ STAGE() [169/188]

SK_OPTS_NS::STAGE ( store_src_a  ,
float *  ptr 
)

Definition at line 2140 of file SkRasterPipeline_opts.h.

2140 {
2141 sk_unaligned_store(ptr, a);
2142}

◆ STAGE() [170/188]

SK_OPTS_NS::STAGE ( store_src_rg  ,
float *  ptr 
)

Definition at line 2130 of file SkRasterPipeline_opts.h.

2130 {
2131 sk_unaligned_store(ptr + 0*N, r);
2132 sk_unaligned_store(ptr + 1*N, g);
2133}

◆ STAGE() [171/188]

SK_OPTS_NS::STAGE ( swap_rb  ,
NoCtx   
)

Definition at line 2399 of file SkRasterPipeline_opts.h.

2399 {
2400 auto tmp = r;
2401 r = b;
2402 b = tmp;
2403}

◆ STAGE() [172/188]

SK_OPTS_NS::STAGE ( swap_rb_dst  ,
NoCtx   
)

Definition at line 2404 of file SkRasterPipeline_opts.h.

2404 {
2405 auto tmp = dr;
2406 dr = db;
2407 db = tmp;
2408}

◆ STAGE() [173/188]

SK_OPTS_NS::STAGE ( swap_src_dst  ,
NoCtx   
)

Definition at line 2422 of file SkRasterPipeline_opts.h.

2422 {
2423 std::swap(r, dr);
2424 std::swap(g, dg);
2425 std::swap(b, db);
2426 std::swap(a, da);
2427}
void swap(sk_sp< T > &a, sk_sp< T > &b)
Definition: SkRefCnt.h:341

◆ STAGE() [174/188]

SK_OPTS_NS::STAGE ( swizzle  ,
void *  ctx 
)

Definition at line 5043 of file SkRasterPipeline_opts.h.

5043 {
5044 auto ir = r, ig = g, ib = b, ia = a;
5045 F* o[] = {&r, &g, &b, &a};
5046 char swiz[4];
5047 memcpy(swiz, &ctx, sizeof(swiz));
5048
5049 for (int i = 0; i < 4; ++i) {
5050 switch (swiz[i]) {
5051 case 'r': *o[i] = ir; break;
5052 case 'g': *o[i] = ig; break;
5053 case 'b': *o[i] = ib; break;
5054 case 'a': *o[i] = ia; break;
5055 case '0': *o[i] = F0; break;
5056 case '1': *o[i] = F1; break;
5057 default: break;
5058 }
5059 }
5060}

◆ STAGE() [175/188]

SK_OPTS_NS::STAGE ( unbounded_set_rgb  ,
const float *  rgb 
)

Definition at line 2393 of file SkRasterPipeline_opts.h.

2393 {
2394 r = F_(rgb[0]);
2395 g = F_(rgb[1]);
2396 b = F_(rgb[2]);
2397}

◆ STAGE() [176/188]

SK_OPTS_NS::STAGE ( unbounded_uniform_color  ,
const SkRasterPipeline_UniformColorCtx c 
)

◆ STAGE() [177/188]

SK_OPTS_NS::STAGE ( uniform_color  ,
const SkRasterPipeline_UniformColorCtx c 
)

Definition at line 2084 of file SkRasterPipeline_opts.h.

2084 {
2085 r = F_(c->r);
2086 g = F_(c->g);
2087 b = F_(c->b);
2088 a = F_(c->a);
2089}

◆ STAGE() [178/188]

SK_OPTS_NS::STAGE ( uniform_color_dst  ,
const SkRasterPipeline_UniformColorCtx c 
)

Definition at line 2097 of file SkRasterPipeline_opts.h.

2097 {
2098 dr = F_(c->r);
2099 dg = F_(c->g);
2100 db = F_(c->b);
2101 da = F_(c->a);
2102}

◆ STAGE() [179/188]

SK_OPTS_NS::STAGE ( unpremul  ,
NoCtx   
)

Definition at line 2439 of file SkRasterPipeline_opts.h.

2439 {
2440 float inf = sk_bit_cast<float>(0x7f800000);
2441 auto scale = if_then_else(1.0f/a < inf, 1.0f/a, 0.0f);
2442 r *= scale;
2443 g *= scale;
2444 b *= scale;
2445}

◆ STAGE() [180/188]

SK_OPTS_NS::STAGE ( unpremul_polar  ,
NoCtx   
)

Definition at line 2446 of file SkRasterPipeline_opts.h.

2446 {
2447 float inf = sk_bit_cast<float>(0x7f800000);
2448 auto scale = if_then_else(1.0f/a < inf, 1.0f/a, 0.0f);
2449 g *= scale;
2450 b *= scale;
2451}

◆ STAGE() [181/188]

SK_OPTS_NS::STAGE ( white_color  ,
NoCtx   
)

Definition at line 2110 of file SkRasterPipeline_opts.h.

2110 {
2111 r = g = b = a = F1;
2112}

◆ STAGE() [182/188]

SK_OPTS_NS::STAGE ( xy_to_2pt_conical_focal_on_circle  ,
NoCtx   
)

Definition at line 3512 of file SkRasterPipeline_opts.h.

3512 {
3513 F x = r, y = g, &t = r;
3514 t = x + y*y / x; // (x^2 + y^2) / x
3515}

◆ STAGE() [183/188]

SK_OPTS_NS::STAGE ( xy_to_2pt_conical_greater  ,
const SkRasterPipeline_2PtConicalCtx ctx 
)

Definition at line 3522 of file SkRasterPipeline_opts.h.

3522 {
3523 F x = r, y = g, &t = r;
3524 t = sqrt_(x*x - y*y) - x * ctx->fP0; // ctx->fP0 = 1/r1
3525}

◆ STAGE() [184/188]

SK_OPTS_NS::STAGE ( xy_to_2pt_conical_smaller  ,
const SkRasterPipeline_2PtConicalCtx ctx 
)

Definition at line 3527 of file SkRasterPipeline_opts.h.

3527 {
3528 F x = r, y = g, &t = r;
3529 t = -sqrt_(x*x - y*y) - x * ctx->fP0; // ctx->fP0 = 1/r1
3530}

◆ STAGE() [185/188]

SK_OPTS_NS::STAGE ( xy_to_2pt_conical_strip  ,
const SkRasterPipeline_2PtConicalCtx ctx 
)

Definition at line 3507 of file SkRasterPipeline_opts.h.

3507 {
3508 F x = r, y = g, &t = r;
3509 t = x + sqrt_(ctx->fP0 - y*y); // ctx->fP0 = r0 * r0
3510}

◆ STAGE() [186/188]

SK_OPTS_NS::STAGE ( xy_to_2pt_conical_well_behaved  ,
const SkRasterPipeline_2PtConicalCtx ctx 
)

Definition at line 3517 of file SkRasterPipeline_opts.h.

3517 {
3518 F x = r, y = g, &t = r;
3519 t = sqrt_(x*x + y*y) - x * ctx->fP0; // ctx->fP0 = 1/r1
3520}

◆ STAGE() [187/188]

SK_OPTS_NS::STAGE ( xy_to_radius  ,
NoCtx   
)

Definition at line 3497 of file SkRasterPipeline_opts.h.

3497 {
3498 F X2 = r * r,
3499 Y2 = g * g;
3500 r = sqrt_(X2 + Y2);
3501}

◆ STAGE() [188/188]

SK_OPTS_NS::STAGE ( xy_to_unit_angle  ,
NoCtx   
)

Definition at line 3471 of file SkRasterPipeline_opts.h.

3471 {
3472 F X = r,
3473 Y = g;
3474 F xabs = abs_(X),
3475 yabs = abs_(Y);
3476
3477 F slope = min(xabs, yabs)/max(xabs, yabs);
3478 F s = slope * slope;
3479
3480 // Use a 7th degree polynomial to approximate atan.
3481 // This was generated using sollya.gforge.inria.fr.
3482 // A float optimized polynomial was generated using the following command.
3483 // P1 = fpminimax((1/(2*Pi))*atan(x),[|1,3,5,7|],[|24...|],[2^(-40),1],relative);
3484 F phi = slope
3485 * (0.15912117063999176025390625f + s
3486 * (-5.185396969318389892578125e-2f + s
3487 * (2.476101927459239959716796875e-2f + s
3488 * (-7.0547382347285747528076171875e-3f))));
3489
3490 phi = if_then_else(xabs < yabs, 1.0f/4.0f - phi, phi);
3491 phi = if_then_else(X < 0.0f , 1.0f/2.0f - phi, phi);
3492 phi = if_then_else(Y < 0.0f , 1.0f - phi , phi);
3493 phi = if_then_else(phi != phi , 0.0f , phi); // Check for NaN.
3494 r = phi;
3495}

◆ STAGE_BRANCH() [1/5]

SK_OPTS_NS::STAGE_BRANCH ( branch_if_all_lanes_active  ,
SkRasterPipeline_BranchIfAllLanesActiveCtx ctx 
)

Definition at line 3979 of file SkRasterPipeline_opts.h.

3979 {
3980 uint32_t iota[] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
3981 static_assert(std::size(iota) >= SkRasterPipeline_kMaxStride_highp);
3982
3983 I32 tailLanes = cond_to_mask(*ctx->tail <= sk_unaligned_load<U32>(iota));
3984 return all(execution_mask() | tailLanes) ? ctx->offset : 1;
3985}
#define execution_mask()
V< int32_t > I32
Definition: Transform_inl.h:15
SI bool all(I32 c)

◆ STAGE_BRANCH() [2/5]

SK_OPTS_NS::STAGE_BRANCH ( branch_if_any_lanes_active  ,
SkRasterPipeline_BranchCtx ctx 
)

Definition at line 3987 of file SkRasterPipeline_opts.h.

3987 {
3988 return any(execution_mask()) ? ctx->offset : 1;
3989}
SI bool any(I32 c)

◆ STAGE_BRANCH() [3/5]

SK_OPTS_NS::STAGE_BRANCH ( branch_if_no_active_lanes_eq  ,
SkRasterPipeline_BranchIfEqualCtx ctx 
)

Definition at line 3999 of file SkRasterPipeline_opts.h.

3999 {
4000 // Compare each lane against the expected value...
4001 I32 match = cond_to_mask(*(const I32*)ctx->ptr == ctx->value);
4002 // ... but mask off lanes that aren't executing.
4003 match &= execution_mask();
4004 // If any lanes matched, don't take the branch.
4005 return any(match) ? 1 : ctx->offset;
4006}
def match(bench, filt)
Definition: benchmark.py:23

◆ STAGE_BRANCH() [4/5]

SK_OPTS_NS::STAGE_BRANCH ( branch_if_no_lanes_active  ,
SkRasterPipeline_BranchCtx ctx 
)

Definition at line 3991 of file SkRasterPipeline_opts.h.

3991 {
3992 return any(execution_mask()) ? 1 : ctx->offset;
3993}

◆ STAGE_BRANCH() [5/5]

SK_OPTS_NS::STAGE_BRANCH ( jump  ,
SkRasterPipeline_BranchCtx ctx 
)

Definition at line 3995 of file SkRasterPipeline_opts.h.

3995 {
3996 return ctx->offset;
3997}

◆ STAGE_TAIL() [1/75]

SK_OPTS_NS::STAGE_TAIL ( acos_float  ,
F dst 
)

Definition at line 4449 of file SkRasterPipeline_opts.h.

4449{ *dst = acos_(*dst); }

◆ STAGE_TAIL() [2/75]

SK_OPTS_NS::STAGE_TAIL ( asin_float  ,
F dst 
)

Definition at line 4448 of file SkRasterPipeline_opts.h.

4448{ *dst = asin_(*dst); }

◆ STAGE_TAIL() [3/75]

SK_OPTS_NS::STAGE_TAIL ( atan_float  ,
F dst 
)

Definition at line 4450 of file SkRasterPipeline_opts.h.

4450{ *dst = atan_(*dst); }

◆ STAGE_TAIL() [4/75]

SK_OPTS_NS::STAGE_TAIL ( case_op  ,
SkRasterPipeline_CaseOpCtx packed 
)

Definition at line 3947 of file SkRasterPipeline_opts.h.

3947 {
3948 auto ctx = SkRPCtxUtils::Unpack(packed);
3949
3950 // Check each lane to see if the case value matches the expectation.
3951 I32* actualValue = (I32*)(base + ctx.offset);
3952 I32 caseMatches = cond_to_mask(*actualValue == ctx.expectedValue);
3953
3954 // In lanes where we found a match, enable the loop mask...
3955 g = sk_bit_cast<F>(sk_bit_cast<I32>(g) | caseMatches);
3957
3958 // ... and clear the default-case mask.
3959 I32* defaultMask = actualValue + 1;
3960 *defaultMask &= ~caseMatches;
3961}
#define update_execution_mask()

◆ STAGE_TAIL() [5/75]

SK_OPTS_NS::STAGE_TAIL ( continue_op  ,
I32 continueMask 
)

Definition at line 3938 of file SkRasterPipeline_opts.h.

3938 {
3939 // Set any currently-executing lanes in the continue-mask to true.
3940 *continueMask |= execution_mask();
3941
3942 // Disable any currently-executing lanes from the loop mask. (Just like `mask_off_loop_mask`.)
3943 g = sk_bit_cast<F>(sk_bit_cast<I32>(g) & ~execution_mask());
3945}

◆ STAGE_TAIL() [6/75]

SK_OPTS_NS::STAGE_TAIL ( copy_2_immutables_unmasked  ,
SkRasterPipeline_BinaryOpCtx packed 
)

Definition at line 4158 of file SkRasterPipeline_opts.h.

4158 {
4159 copy_n_immutable_unmasked_fn<2>(packed, base);
4160}

◆ STAGE_TAIL() [7/75]

SK_OPTS_NS::STAGE_TAIL ( copy_2_slots_masked  ,
SkRasterPipeline_BinaryOpCtx packed 
)

Definition at line 4183 of file SkRasterPipeline_opts.h.

4183 {
4184 copy_n_slots_masked_fn<2>(packed, base, execution_mask());
4185}

◆ STAGE_TAIL() [8/75]

SK_OPTS_NS::STAGE_TAIL ( copy_2_slots_unmasked  ,
SkRasterPipeline_BinaryOpCtx packed 
)

Definition at line 4128 of file SkRasterPipeline_opts.h.

4128 {
4129 copy_n_slots_unmasked_fn<2>(packed, base);
4130}

◆ STAGE_TAIL() [9/75]

SK_OPTS_NS::STAGE_TAIL ( copy_2_uniforms  ,
SkRasterPipeline_UniformCtx ctx 
)

Definition at line 4070 of file SkRasterPipeline_opts.h.

4070 {
4071 const int* src = ctx->src;
4072 I32* dst = (I32*)ctx->dst;
4073 dst[0] = I32_(src[0]);
4074 dst[1] = I32_(src[1]);
4075}

◆ STAGE_TAIL() [10/75]

SK_OPTS_NS::STAGE_TAIL ( copy_3_immutables_unmasked  ,
SkRasterPipeline_BinaryOpCtx packed 
)

Definition at line 4161 of file SkRasterPipeline_opts.h.

4161 {
4162 copy_n_immutable_unmasked_fn<3>(packed, base);
4163}

◆ STAGE_TAIL() [11/75]

SK_OPTS_NS::STAGE_TAIL ( copy_3_slots_masked  ,
SkRasterPipeline_BinaryOpCtx packed 
)

Definition at line 4186 of file SkRasterPipeline_opts.h.

4186 {
4187 copy_n_slots_masked_fn<3>(packed, base, execution_mask());
4188}

◆ STAGE_TAIL() [12/75]

SK_OPTS_NS::STAGE_TAIL ( copy_3_slots_unmasked  ,
SkRasterPipeline_BinaryOpCtx packed 
)

Definition at line 4131 of file SkRasterPipeline_opts.h.

4131 {
4132 copy_n_slots_unmasked_fn<3>(packed, base);
4133}

◆ STAGE_TAIL() [13/75]

SK_OPTS_NS::STAGE_TAIL ( copy_3_uniforms  ,
SkRasterPipeline_UniformCtx ctx 
)

Definition at line 4076 of file SkRasterPipeline_opts.h.

4076 {
4077 const int* src = ctx->src;
4078 I32* dst = (I32*)ctx->dst;
4079 dst[0] = I32_(src[0]);
4080 dst[1] = I32_(src[1]);
4081 dst[2] = I32_(src[2]);
4082}

◆ STAGE_TAIL() [14/75]

SK_OPTS_NS::STAGE_TAIL ( copy_4_immutables_unmasked  ,
SkRasterPipeline_BinaryOpCtx packed 
)

Definition at line 4164 of file SkRasterPipeline_opts.h.

4164 {
4165 copy_n_immutable_unmasked_fn<4>(packed, base);
4166}

◆ STAGE_TAIL() [15/75]

SK_OPTS_NS::STAGE_TAIL ( copy_4_slots_masked  ,
SkRasterPipeline_BinaryOpCtx packed 
)

Definition at line 4189 of file SkRasterPipeline_opts.h.

4189 {
4190 copy_n_slots_masked_fn<4>(packed, base, execution_mask());
4191}

◆ STAGE_TAIL() [16/75]

SK_OPTS_NS::STAGE_TAIL ( copy_4_slots_unmasked  ,
SkRasterPipeline_BinaryOpCtx packed 
)

Definition at line 4134 of file SkRasterPipeline_opts.h.

4134 {
4135 copy_n_slots_unmasked_fn<4>(packed, base);
4136}

◆ STAGE_TAIL() [17/75]

SK_OPTS_NS::STAGE_TAIL ( copy_4_uniforms  ,
SkRasterPipeline_UniformCtx ctx 
)

Definition at line 4083 of file SkRasterPipeline_opts.h.

4083 {
4084 const int* src = ctx->src;
4085 I32* dst = (I32*)ctx->dst;
4086 dst[0] = I32_(src[0]);
4087 dst[1] = I32_(src[1]);
4088 dst[2] = I32_(src[2]);
4089 dst[3] = I32_(src[3]);
4090}

◆ STAGE_TAIL() [18/75]

SK_OPTS_NS::STAGE_TAIL ( copy_constant  ,
SkRasterPipeline_ConstantCtx packed 
)

Definition at line 4092 of file SkRasterPipeline_opts.h.

4092 {
4093 auto ctx = SkRPCtxUtils::Unpack(packed);
4094 I32* dst = (I32*)(base + ctx.dst);
4095 I32 value = I32_(ctx.value);
4096 dst[0] = value;
4097}

◆ STAGE_TAIL() [19/75]

SK_OPTS_NS::STAGE_TAIL ( copy_from_indirect_uniform_unmasked  ,
SkRasterPipeline_CopyIndirectCtx ctx 
)

Definition at line 4295 of file SkRasterPipeline_opts.h.

4295 {
4296 // Clamp the indirect offsets to stay within the limit.
4297 U32 offsets = *(const U32*)ctx->indirectOffset;
4299
4300 // Use gather to perform indirect lookups; write the results into `dst`.
4301 const int* src = ctx->src;
4302 I32* dst = (I32*)ctx->dst;
4303 I32* end = dst + ctx->slots;
4304 do {
4305 *dst = gather(src, offsets);
4306 dst += 1;
4307 src += 1;
4308 } while (dst != end);
4309}

◆ STAGE_TAIL() [20/75]

SK_OPTS_NS::STAGE_TAIL ( copy_from_indirect_unmasked  ,
SkRasterPipeline_CopyIndirectCtx ctx 
)

Definition at line 4271 of file SkRasterPipeline_opts.h.

4271 {
4272 // Clamp the indirect offsets to stay within the limit.
4273 U32 offsets = *(const U32*)ctx->indirectOffset;
4275
4276 // Scale up the offsets to account for the N lanes per value.
4277 offsets *= N;
4278
4279 // Adjust the offsets forward so that they fetch from the correct lane.
4280 static constexpr uint32_t iota[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
4281 static_assert(std::size(iota) >= SkRasterPipeline_kMaxStride_highp);
4282 offsets += sk_unaligned_load<U32>(iota);
4283
4284 // Use gather to perform indirect lookups; write the results into `dst`.
4285 const int* src = ctx->src;
4286 I32* dst = (I32*)ctx->dst;
4287 I32* end = dst + ctx->slots;
4288 do {
4289 *dst = gather(src, offsets);
4290 dst += 1;
4291 src += N;
4292 } while (dst != end);
4293}

◆ STAGE_TAIL() [21/75]

SK_OPTS_NS::STAGE_TAIL ( copy_immutable_unmasked  ,
SkRasterPipeline_BinaryOpCtx packed 
)

Definition at line 4155 of file SkRasterPipeline_opts.h.

4155 {
4156 copy_n_immutable_unmasked_fn<1>(packed, base);
4157}

◆ STAGE_TAIL() [22/75]

SK_OPTS_NS::STAGE_TAIL ( copy_slot_masked  ,
SkRasterPipeline_BinaryOpCtx packed 
)

Definition at line 4180 of file SkRasterPipeline_opts.h.

4180 {
4181 copy_n_slots_masked_fn<1>(packed, base, execution_mask());
4182}

◆ STAGE_TAIL() [23/75]

SK_OPTS_NS::STAGE_TAIL ( copy_slot_unmasked  ,
SkRasterPipeline_BinaryOpCtx packed 
)

Definition at line 4125 of file SkRasterPipeline_opts.h.

4125 {
4126 copy_n_slots_unmasked_fn<1>(packed, base);
4127}

◆ STAGE_TAIL() [24/75]

SK_OPTS_NS::STAGE_TAIL ( copy_to_indirect_masked  ,
SkRasterPipeline_CopyIndirectCtx ctx 
)

Definition at line 4311 of file SkRasterPipeline_opts.h.

4311 {
4312 // Clamp the indirect offsets to stay within the limit.
4313 U32 offsets = *(const U32*)ctx->indirectOffset;
4315
4316 // Scale up the offsets to account for the N lanes per value.
4317 offsets *= N;
4318
4319 // Adjust the offsets forward so that they store into the correct lane.
4320 static constexpr uint32_t iota[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
4321 static_assert(std::size(iota) >= SkRasterPipeline_kMaxStride_highp);
4322 offsets += sk_unaligned_load<U32>(iota);
4323
4324 // Perform indirect, masked writes into `dst`.
4325 const I32* src = (const I32*)ctx->src;
4326 const I32* end = src + ctx->slots;
4327 int* dst = ctx->dst;
4328 I32 mask = execution_mask();
4329 do {
4330 scatter_masked(*src, dst, offsets, mask);
4331 dst += N;
4332 src += 1;
4333 } while (src != end);
4334}
SI void scatter_masked(I32 src, int *dst, U32 ix, I32 mask)

◆ STAGE_TAIL() [25/75]

SK_OPTS_NS::STAGE_TAIL ( copy_uniform  ,
SkRasterPipeline_UniformCtx ctx 
)

Definition at line 4065 of file SkRasterPipeline_opts.h.

4065 {
4066 const int* src = ctx->src;
4067 I32* dst = (I32*)ctx->dst;
4068 dst[0] = I32_(src[0]);
4069}

◆ STAGE_TAIL() [26/75]

SK_OPTS_NS::STAGE_TAIL ( cos_float  ,
F dst 
)

Definition at line 4446 of file SkRasterPipeline_opts.h.

4446{ *dst = cos_(*dst); }

◆ STAGE_TAIL() [27/75]

SK_OPTS_NS::STAGE_TAIL ( dot_3_floats  ,
F dst 
)

Definition at line 4769 of file SkRasterPipeline_opts.h.

4769 {
4770 dst[0] = mad(dst[0], dst[3],
4771 mad(dst[1], dst[4],
4772 dst[2] * dst[5]));
4773}

◆ STAGE_TAIL() [28/75]

SK_OPTS_NS::STAGE_TAIL ( dot_4_floats  ,
F dst 
)

Definition at line 4775 of file SkRasterPipeline_opts.h.

4775 {
4776 dst[0] = mad(dst[0], dst[4],
4777 mad(dst[1], dst[5],
4778 mad(dst[2], dst[6],
4779 dst[3] * dst[7])));
4780}

◆ STAGE_TAIL() [29/75]

SK_OPTS_NS::STAGE_TAIL ( exchange_src  ,
F rgba 
)

Definition at line 3875 of file SkRasterPipeline_opts.h.

3875 {
3876 // Swaps r,g,b,a registers with the values at `rgba`.
3877 F temp[4] = {r, g, b, a};
3878 r = rgba[0];
3879 rgba[0] = temp[0];
3880 g = rgba[1];
3881 rgba[1] = temp[1];
3882 b = rgba[2];
3883 rgba[2] = temp[2];
3884 a = rgba[3];
3885 rgba[3] = temp[3];
3886}

◆ STAGE_TAIL() [30/75]

SK_OPTS_NS::STAGE_TAIL ( exp2_float  ,
F dst 
)

Definition at line 4453 of file SkRasterPipeline_opts.h.

4453{ *dst = approx_pow2(*dst); }

◆ STAGE_TAIL() [31/75]

SK_OPTS_NS::STAGE_TAIL ( exp_float  ,
F dst 
)

Definition at line 4452 of file SkRasterPipeline_opts.h.

4452{ *dst = approx_exp(*dst); }

◆ STAGE_TAIL() [32/75]

SK_OPTS_NS::STAGE_TAIL ( init_lane_masks  ,
SkRasterPipeline_InitLaneMasksCtx ctx 
)

Definition at line 3852 of file SkRasterPipeline_opts.h.

3852 {
3853 uint32_t iota[] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
3854 static_assert(std::size(iota) >= SkRasterPipeline_kMaxStride_highp);
3855
3856 I32 mask = cond_to_mask(sk_unaligned_load<U32>(iota) < *ctx->tail);
3857 r = g = b = a = sk_bit_cast<F>(mask);
3858}

◆ STAGE_TAIL() [33/75]

SK_OPTS_NS::STAGE_TAIL ( inverse_mat2  ,
F dst 
)

Definition at line 4457 of file SkRasterPipeline_opts.h.

4457 {
4458 F a00 = dst[0], a01 = dst[1],
4459 a10 = dst[2], a11 = dst[3];
4460 F det = nmad(a01, a10, a00 * a11),
4461 invdet = rcp_precise(det);
4462 dst[0] = invdet * a11;
4463 dst[1] = -invdet * a01;
4464 dst[2] = -invdet * a10;
4465 dst[3] = invdet * a00;
4466}

◆ STAGE_TAIL() [34/75]

SK_OPTS_NS::STAGE_TAIL ( inverse_mat3  ,
F dst 
)

Definition at line 4468 of file SkRasterPipeline_opts.h.

4468 {
4469 F a00 = dst[0], a01 = dst[1], a02 = dst[2],
4470 a10 = dst[3], a11 = dst[4], a12 = dst[5],
4471 a20 = dst[6], a21 = dst[7], a22 = dst[8];
4472 F b01 = nmad(a12, a21, a22 * a11),
4473 b11 = nmad(a22, a10, a12 * a20),
4474 b21 = nmad(a11, a20, a21 * a10);
4475 F det = mad(a00, b01, mad(a01, b11, a02 * b21)),
4476 invdet = rcp_precise(det);
4477 dst[0] = invdet * b01;
4478 dst[1] = invdet * nmad(a22, a01, a02 * a21);
4479 dst[2] = invdet * nmad(a02, a11, a12 * a01);
4480 dst[3] = invdet * b11;
4481 dst[4] = invdet * nmad(a02, a20, a22 * a00);
4482 dst[5] = invdet * nmad(a12, a00, a02 * a10);
4483 dst[6] = invdet * b21;
4484 dst[7] = invdet * nmad(a21, a00, a01 * a20);
4485 dst[8] = invdet * nmad(a01, a10, a11 * a00);
4486}

◆ STAGE_TAIL() [35/75]

SK_OPTS_NS::STAGE_TAIL ( inverse_mat4  ,
F dst 
)

Definition at line 4488 of file SkRasterPipeline_opts.h.

4488 {
4489 F a00 = dst[0], a01 = dst[1], a02 = dst[2], a03 = dst[3],
4490 a10 = dst[4], a11 = dst[5], a12 = dst[6], a13 = dst[7],
4491 a20 = dst[8], a21 = dst[9], a22 = dst[10], a23 = dst[11],
4492 a30 = dst[12], a31 = dst[13], a32 = dst[14], a33 = dst[15];
4493 F b00 = nmad(a01, a10, a00 * a11),
4494 b01 = nmad(a02, a10, a00 * a12),
4495 b02 = nmad(a03, a10, a00 * a13),
4496 b03 = nmad(a02, a11, a01 * a12),
4497 b04 = nmad(a03, a11, a01 * a13),
4498 b05 = nmad(a03, a12, a02 * a13),
4499 b06 = nmad(a21, a30, a20 * a31),
4500 b07 = nmad(a22, a30, a20 * a32),
4501 b08 = nmad(a23, a30, a20 * a33),
4502 b09 = nmad(a22, a31, a21 * a32),
4503 b10 = nmad(a23, a31, a21 * a33),
4504 b11 = nmad(a23, a32, a22 * a33),
4505 det = mad(b00, b11, b05 * b06) + mad(b02, b09, b03 * b08) - mad(b01, b10, b04 * b07),
4506 invdet = rcp_precise(det);
4507 b00 *= invdet;
4508 b01 *= invdet;
4509 b02 *= invdet;
4510 b03 *= invdet;
4511 b04 *= invdet;
4512 b05 *= invdet;
4513 b06 *= invdet;
4514 b07 *= invdet;
4515 b08 *= invdet;
4516 b09 *= invdet;
4517 b10 *= invdet;
4518 b11 *= invdet;
4519 dst[0] = mad(a13, b09, nmad(a12, b10, a11*b11));
4520 dst[1] = nmad(a03, b09, nmad(a01, b11, a02*b10));
4521 dst[2] = mad(a33, b03, nmad(a32, b04, a31*b05));
4522 dst[3] = nmad(a23, b03, nmad(a21, b05, a22*b04));
4523 dst[4] = nmad(a13, b07, nmad(a10, b11, a12*b08));
4524 dst[5] = mad(a03, b07, nmad(a02, b08, a00*b11));
4525 dst[6] = nmad(a33, b01, nmad(a30, b05, a32*b02));
4526 dst[7] = mad(a23, b01, nmad(a22, b02, a20*b05));
4527 dst[8] = mad(a13, b06, nmad(a11, b08, a10*b10));
4528 dst[9] = nmad(a03, b06, nmad(a00, b10, a01*b08));
4529 dst[10] = mad(a33, b00, nmad(a31, b02, a30*b04));
4530 dst[11] = nmad(a23, b00, nmad(a20, b04, a21*b02));
4531 dst[12] = nmad(a12, b06, nmad(a10, b09, a11*b07));
4532 dst[13] = mad(a02, b06, nmad(a01, b07, a00*b09));
4533 dst[14] = nmad(a32, b00, nmad(a30, b03, a31*b01));
4534 dst[15] = mad(a22, b00, nmad(a21, b01, a20*b03));
4535}

◆ STAGE_TAIL() [36/75]

SK_OPTS_NS::STAGE_TAIL ( load_condition_mask  ,
F ctx 
)

Definition at line 3888 of file SkRasterPipeline_opts.h.

3888 {
3889 r = sk_unaligned_load<F>(ctx);
3891}

◆ STAGE_TAIL() [37/75]

SK_OPTS_NS::STAGE_TAIL ( load_loop_mask  ,
F ctx 
)

Definition at line 3909 of file SkRasterPipeline_opts.h.

3909 {
3910 g = sk_unaligned_load<F>(ctx);
3912}

◆ STAGE_TAIL() [38/75]

SK_OPTS_NS::STAGE_TAIL ( load_return_mask  ,
F ctx 
)

Definition at line 3963 of file SkRasterPipeline_opts.h.

3963 {
3964 b = sk_unaligned_load<F>(ctx);
3966}

◆ STAGE_TAIL() [39/75]

SK_OPTS_NS::STAGE_TAIL ( log2_float  ,
F dst 
)

Definition at line 4455 of file SkRasterPipeline_opts.h.

4455{ *dst = approx_log2(*dst); }

◆ STAGE_TAIL() [40/75]

SK_OPTS_NS::STAGE_TAIL ( log_float  ,
F dst 
)

Definition at line 4454 of file SkRasterPipeline_opts.h.

4454{ *dst = approx_log(*dst); }

◆ STAGE_TAIL() [41/75]

SK_OPTS_NS::STAGE_TAIL ( mask_off_loop_mask  ,
NoCtx   
)

Definition at line 3918 of file SkRasterPipeline_opts.h.

3918 {
3919 // We encountered a break statement. If a lane was active, it should be masked off now, and stay
3920 // masked-off until the termination of the loop.
3921 g = sk_bit_cast<F>(sk_bit_cast<I32>(g) & ~execution_mask());
3923}

◆ STAGE_TAIL() [42/75]

SK_OPTS_NS::STAGE_TAIL ( mask_off_return_mask  ,
NoCtx   
)

Definition at line 3972 of file SkRasterPipeline_opts.h.

3972 {
3973 // We encountered a return statement. If a lane was active, it should be masked off now, and
3974 // stay masked-off until the end of the function.
3975 b = sk_bit_cast<F>(sk_bit_cast<I32>(b) & ~execution_mask());
3977}

◆ STAGE_TAIL() [43/75]

SK_OPTS_NS::STAGE_TAIL ( matrix_multiply_2  ,
SkRasterPipeline_MatrixMultiplyCtx packed 
)

Definition at line 4831 of file SkRasterPipeline_opts.h.

4831 {
4832 matrix_multiply<2>(packed, base);
4833}

◆ STAGE_TAIL() [44/75]

SK_OPTS_NS::STAGE_TAIL ( matrix_multiply_3  ,
SkRasterPipeline_MatrixMultiplyCtx packed 
)

Definition at line 4835 of file SkRasterPipeline_opts.h.

4835 {
4836 matrix_multiply<3>(packed, base);
4837}

◆ STAGE_TAIL() [45/75]

SK_OPTS_NS::STAGE_TAIL ( matrix_multiply_4  ,
SkRasterPipeline_MatrixMultiplyCtx packed 
)

Definition at line 4839 of file SkRasterPipeline_opts.h.

4839 {
4840 matrix_multiply<4>(packed, base);
4841}

◆ STAGE_TAIL() [46/75]

SK_OPTS_NS::STAGE_TAIL ( merge_condition_mask  ,
I32 ptr 
)

Definition at line 3897 of file SkRasterPipeline_opts.h.

3897 {
3898 // Set the condition-mask to the intersection of two adjacent masks at the pointer.
3899 r = sk_bit_cast<F>(ptr[0] & ptr[1]);
3901}

◆ STAGE_TAIL() [47/75]

SK_OPTS_NS::STAGE_TAIL ( merge_inv_condition_mask  ,
I32 ptr 
)

Definition at line 3903 of file SkRasterPipeline_opts.h.

3903 {
3904 // Set the condition-mask to the intersection of the first mask and the inverse of the second.
3905 r = sk_bit_cast<F>(ptr[0] & ~ptr[1]);
3907}

◆ STAGE_TAIL() [48/75]

SK_OPTS_NS::STAGE_TAIL ( merge_loop_mask  ,
I32 ptr 
)

Definition at line 3931 of file SkRasterPipeline_opts.h.

3931 {
3932 // Set the loop-mask to the intersection of the current loop-mask with the mask at the pointer.
3933 // (Note: this behavior subtly differs from merge_condition_mask!)
3934 g = sk_bit_cast<F>(sk_bit_cast<I32>(g) & ptr[0]);
3936}

◆ STAGE_TAIL() [49/75]

SK_OPTS_NS::STAGE_TAIL ( reenable_loop_mask  ,
I32 ptr 
)

Definition at line 3925 of file SkRasterPipeline_opts.h.

3925 {
3926 // Set the loop-mask to the union of the current loop-mask with the mask at the pointer.
3927 g = sk_bit_cast<F>(sk_bit_cast<I32>(g) | ptr[0]);
3929}

◆ STAGE_TAIL() [50/75]

SK_OPTS_NS::STAGE_TAIL ( refract_4_floats  ,
F dst 
)

Definition at line 4845 of file SkRasterPipeline_opts.h.

4845 {
4846 // Algorithm adapted from https://registry.khronos.org/OpenGL-Refpages/gl4/html/refract.xhtml
4847 F *incident = dst + 0;
4848 F *normal = dst + 4;
4849 F eta = dst[8];
4850
4851 F dotNI = mad(normal[0], incident[0],
4852 mad(normal[1], incident[1],
4853 mad(normal[2], incident[2],
4854 normal[3] * incident[3])));
4855
4856 F k = 1.0 - eta * eta * (1.0 - dotNI * dotNI);
4857 F sqrt_k = sqrt_(k);
4858
4859 for (int idx = 0; idx < 4; ++idx) {
4860 dst[idx] = if_then_else(k >= 0,
4861 eta * incident[idx] - (eta * dotNI + sqrt_k) * normal[idx],
4862 0.0);
4863 }
4864}

◆ STAGE_TAIL() [51/75]

SK_OPTS_NS::STAGE_TAIL ( set_base_pointer  ,
std::byte *  p 
)

Definition at line 3837 of file SkRasterPipeline_opts.h.

3837 {
3838 base = p;
3839}

◆ STAGE_TAIL() [52/75]

SK_OPTS_NS::STAGE_TAIL ( shuffle  ,
SkRasterPipeline_ShuffleCtx ctx 
)

Definition at line 4243 of file SkRasterPipeline_opts.h.

4243 {
4244 shuffle_fn<16>((std::byte*)ctx->ptr, ctx->offsets, ctx->count);
4245}

◆ STAGE_TAIL() [53/75]

SK_OPTS_NS::STAGE_TAIL ( splat_2_constants  ,
SkRasterPipeline_ConstantCtx packed 
)

Definition at line 4098 of file SkRasterPipeline_opts.h.

4098 {
4099 auto ctx = SkRPCtxUtils::Unpack(packed);
4100 I32* dst = (I32*)(base + ctx.dst);
4101 I32 value = I32_(ctx.value);
4102 dst[0] = dst[1] = value;
4103}

◆ STAGE_TAIL() [54/75]

SK_OPTS_NS::STAGE_TAIL ( splat_3_constants  ,
SkRasterPipeline_ConstantCtx packed 
)

Definition at line 4104 of file SkRasterPipeline_opts.h.

4104 {
4105 auto ctx = SkRPCtxUtils::Unpack(packed);
4106 I32* dst = (I32*)(base + ctx.dst);
4107 I32 value = I32_(ctx.value);
4108 dst[0] = dst[1] = dst[2] = value;
4109}

◆ STAGE_TAIL() [55/75]

SK_OPTS_NS::STAGE_TAIL ( splat_4_constants  ,
SkRasterPipeline_ConstantCtx packed 
)

Definition at line 4110 of file SkRasterPipeline_opts.h.

4110 {
4111 auto ctx = SkRPCtxUtils::Unpack(packed);
4112 I32* dst = (I32*)(base + ctx.dst);
4113 I32 value = I32_(ctx.value);
4114 dst[0] = dst[1] = dst[2] = dst[3] = value;
4115}

◆ STAGE_TAIL() [56/75]

SK_OPTS_NS::STAGE_TAIL ( sqrt_float  ,
F dst 
)

Definition at line 4451 of file SkRasterPipeline_opts.h.

4451{ *dst = sqrt_(*dst); }

◆ STAGE_TAIL() [57/75]

SK_OPTS_NS::STAGE_TAIL ( store_condition_mask  ,
F ctx 
)

Definition at line 3893 of file SkRasterPipeline_opts.h.

3893 {
3894 sk_unaligned_store(ctx, r);
3895}

◆ STAGE_TAIL() [58/75]

SK_OPTS_NS::STAGE_TAIL ( store_device_xy01  ,
F dst 
)

Definition at line 3860 of file SkRasterPipeline_opts.h.

3860 {
3861 // This is very similar to `seed_shader + store_src`, but b/a are backwards.
3862 // (sk_FragCoord actually puts w=1 in the w slot.)
3863 static constexpr float iota[] = {
3864 0.5f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5f, 6.5f, 7.5f,
3865 8.5f, 9.5f,10.5f,11.5f,12.5f,13.5f,14.5f,15.5f,
3866 };
3867 static_assert(std::size(iota) >= SkRasterPipeline_kMaxStride_highp);
3868
3869 dst[0] = cast(U32_(dx)) + sk_unaligned_load<F>(iota);
3870 dst[1] = cast(U32_(dy)) + 0.5f;
3871 dst[2] = F0;
3872 dst[3] = F1;
3873}

◆ STAGE_TAIL() [59/75]

SK_OPTS_NS::STAGE_TAIL ( store_loop_mask  ,
F ctx 
)

Definition at line 3914 of file SkRasterPipeline_opts.h.

3914 {
3915 sk_unaligned_store(ctx, g);
3916}

◆ STAGE_TAIL() [60/75]

SK_OPTS_NS::STAGE_TAIL ( store_return_mask  ,
F ctx 
)

Definition at line 3968 of file SkRasterPipeline_opts.h.

3968 {
3969 sk_unaligned_store(ctx, b);
3970}

◆ STAGE_TAIL() [61/75]

SK_OPTS_NS::STAGE_TAIL ( swizzle_1  ,
SkRasterPipeline_SwizzleCtx packed 
)

Definition at line 4231 of file SkRasterPipeline_opts.h.

4231 {
4232 small_swizzle_fn<1>(packed, base);
4233}

◆ STAGE_TAIL() [62/75]

SK_OPTS_NS::STAGE_TAIL ( swizzle_2  ,
SkRasterPipeline_SwizzleCtx packed 
)

Definition at line 4234 of file SkRasterPipeline_opts.h.

4234 {
4235 small_swizzle_fn<2>(packed, base);
4236}

◆ STAGE_TAIL() [63/75]

SK_OPTS_NS::STAGE_TAIL ( swizzle_3  ,
SkRasterPipeline_SwizzleCtx packed 
)

Definition at line 4237 of file SkRasterPipeline_opts.h.

4237 {
4238 small_swizzle_fn<3>(packed, base);
4239}

◆ STAGE_TAIL() [64/75]

SK_OPTS_NS::STAGE_TAIL ( swizzle_4  ,
SkRasterPipeline_SwizzleCtx packed 
)

Definition at line 4240 of file SkRasterPipeline_opts.h.

4240 {
4241 small_swizzle_fn<4>(packed, base);
4242}

◆ STAGE_TAIL() [65/75]

SK_OPTS_NS::STAGE_TAIL ( swizzle_copy_2_slots_masked  ,
SkRasterPipeline_SwizzleCopyCtx ctx 
)

Definition at line 4261 of file SkRasterPipeline_opts.h.

4261 {
4262 swizzle_copy_masked_fn<2>((I32*)ctx->dst, (const I32*)ctx->src, ctx->offsets, execution_mask());
4263}

◆ STAGE_TAIL() [66/75]

SK_OPTS_NS::STAGE_TAIL ( swizzle_copy_3_slots_masked  ,
SkRasterPipeline_SwizzleCopyCtx ctx 
)

Definition at line 4264 of file SkRasterPipeline_opts.h.

4264 {
4265 swizzle_copy_masked_fn<3>((I32*)ctx->dst, (const I32*)ctx->src, ctx->offsets, execution_mask());
4266}

◆ STAGE_TAIL() [67/75]

SK_OPTS_NS::STAGE_TAIL ( swizzle_copy_4_slots_masked  ,
SkRasterPipeline_SwizzleCopyCtx ctx 
)

Definition at line 4267 of file SkRasterPipeline_opts.h.

4267 {
4268 swizzle_copy_masked_fn<4>((I32*)ctx->dst, (const I32*)ctx->src, ctx->offsets, execution_mask());
4269}

◆ STAGE_TAIL() [68/75]

SK_OPTS_NS::STAGE_TAIL ( swizzle_copy_slot_masked  ,
SkRasterPipeline_SwizzleCopyCtx ctx 
)

Definition at line 4258 of file SkRasterPipeline_opts.h.

4258 {
4259 swizzle_copy_masked_fn<1>((I32*)ctx->dst, (const I32*)ctx->src, ctx->offsets, execution_mask());
4260}

◆ STAGE_TAIL() [69/75]

SK_OPTS_NS::STAGE_TAIL ( swizzle_copy_to_indirect_masked  ,
SkRasterPipeline_SwizzleCopyIndirectCtx ctx 
)

Definition at line 4336 of file SkRasterPipeline_opts.h.

4336 {
4337 // Clamp the indirect offsets to stay within the limit.
4338 U32 offsets = *(const U32*)ctx->indirectOffset;
4340
4341 // Scale up the offsets to account for the N lanes per value.
4342 offsets *= N;
4343
4344 // Adjust the offsets forward so that they store into the correct lane.
4345 static constexpr uint32_t iota[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
4346 static_assert(std::size(iota) >= SkRasterPipeline_kMaxStride_highp);
4347 offsets += sk_unaligned_load<U32>(iota);
4348
4349 // Perform indirect, masked, swizzled writes into `dst`.
4350 const I32* src = (const I32*)ctx->src;
4351 const I32* end = src + ctx->slots;
4352 std::byte* dstB = (std::byte*)ctx->dst;
4353 const uint16_t* swizzle = ctx->offsets;
4354 I32 mask = execution_mask();
4355 do {
4356 int* dst = (int*)(dstB + *swizzle);
4357 scatter_masked(*src, dst, offsets, mask);
4358 swizzle += 1;
4359 src += 1;
4360 } while (src != end);
4361}

◆ STAGE_TAIL() [70/75]

SK_OPTS_NS::STAGE_TAIL ( tan_float  ,
F dst 
)

Definition at line 4447 of file SkRasterPipeline_opts.h.

4447{ *dst = tan_(*dst); }

◆ STAGE_TAIL() [71/75]

SK_OPTS_NS::STAGE_TAIL ( trace_enter  ,
SkRasterPipeline_TraceFuncCtx ctx 
)

Definition at line 4015 of file SkRasterPipeline_opts.h.

4015 {
4016 const I32* traceMask = (const I32*)ctx->traceMask;
4017 if (any(execution_mask() & *traceMask)) {
4018 ctx->traceHook->enter(ctx->funcIdx);
4019 }
4020}
virtual void enter(int fnIdx)=0
if(end==-1)

◆ STAGE_TAIL() [72/75]

SK_OPTS_NS::STAGE_TAIL ( trace_exit  ,
SkRasterPipeline_TraceFuncCtx ctx 
)

Definition at line 4022 of file SkRasterPipeline_opts.h.

4022 {
4023 const I32* traceMask = (const I32*)ctx->traceMask;
4024 if (any(execution_mask() & *traceMask)) {
4025 ctx->traceHook->exit(ctx->funcIdx);
4026 }
4027}
virtual void exit(int fnIdx)=0

◆ STAGE_TAIL() [73/75]

SK_OPTS_NS::STAGE_TAIL ( trace_line  ,
SkRasterPipeline_TraceLineCtx ctx 
)

Definition at line 4008 of file SkRasterPipeline_opts.h.

4008 {
4009 const I32* traceMask = (const I32*)ctx->traceMask;
4010 if (any(execution_mask() & *traceMask)) {
4011 ctx->traceHook->line(ctx->lineNumber);
4012 }
4013}
virtual void line(int lineNum)=0

◆ STAGE_TAIL() [74/75]

SK_OPTS_NS::STAGE_TAIL ( trace_scope  ,
SkRasterPipeline_TraceScopeCtx ctx 
)

Definition at line 4029 of file SkRasterPipeline_opts.h.

4029 {
4030 // Note that trace_scope intentionally does not incorporate the execution mask. Otherwise, the
4031 // scopes would become unbalanced if the execution mask changed in the middle of a block. The
4032 // caller is responsible for providing a combined trace- and execution-mask.
4033 const I32* traceMask = (const I32*)ctx->traceMask;
4034 if (any(*traceMask)) {
4035 ctx->traceHook->scope(ctx->delta);
4036 }
4037}
virtual void scope(int delta)=0

◆ STAGE_TAIL() [75/75]

SK_OPTS_NS::STAGE_TAIL ( trace_var  ,
SkRasterPipeline_TraceVarCtx ctx 
)

Definition at line 4039 of file SkRasterPipeline_opts.h.

4039 {
4040 const I32* traceMask = (const I32*)ctx->traceMask;
4041 I32 mask = execution_mask() & *traceMask;
4042 if (any(mask)) {
4043 for (size_t lane = 0; lane < N; ++lane) {
4044 if (select_lane(mask, lane)) {
4045 const I32* data = (const I32*)ctx->data;
4046 int slotIdx = ctx->slotIdx, numSlots = ctx->numSlots;
4047 if (ctx->indirectOffset) {
4048 // If this was an indirect store, apply the indirect-offset to the data pointer.
4049 uint32_t indirectOffset = select_lane(*(const U32*)ctx->indirectOffset, lane);
4050 indirectOffset = std::min<uint32_t>(indirectOffset, ctx->indirectLimit);
4051 data += indirectOffset;
4052 slotIdx += indirectOffset;
4053 }
4054 while (numSlots--) {
4055 ctx->traceHook->var(slotIdx, select_lane(*data, lane));
4056 ++slotIdx;
4057 ++data;
4058 }
4059 break;
4060 }
4061 }
4062 }
4063}
virtual void var(int slot, int32_t val)=0
SI int32_t select_lane(int32_t data, int)

◆ start_pipeline()

static void SK_OPTS_NS::start_pipeline ( size_t  dx,
size_t  dy,
size_t  xlimit,
size_t  ylimit,
SkRasterPipelineStage program,
SkSpan< SkRasterPipeline_MemoryCtxPatch memoryCtxPatches,
uint8_t *  tailPointer 
)
static

Definition at line 1530 of file SkRasterPipeline_opts.h.

1534 {
1535 uint8_t unreferencedTail;
1536 if (!tailPointer) {
1537 tailPointer = &unreferencedTail;
1538 }
1539 auto start = (Stage)program->fn;
1540 const size_t x0 = dx;
1541 std::byte* const base = nullptr;
1542 for (; dy < ylimit; dy++) {
1543 #if JUMPER_NARROW_STAGES
1544 Params params = { x0,dy,base, F0,F0,F0,F0 };
1545 while (params.dx + N <= xlimit) {
1546 start(&params,program, F0,F0,F0,F0);
1547 params.dx += N;
1548 }
1549 if (size_t tail = xlimit - params.dx) {
1550 *tailPointer = tail;
1551 patch_memory_contexts(memoryCtxPatches, params.dx, dy, tail);
1552 start(&params,program, F0,F0,F0,F0);
1553 restore_memory_contexts(memoryCtxPatches, params.dx, dy, tail);
1554 *tailPointer = 0xFF;
1555 }
1556 #else
1557 dx = x0;
1558 while (dx + N <= xlimit) {
1559 start(program,dx,dy,base, F0,F0,F0,F0, F0,F0,F0,F0);
1560 dx += N;
1561 }
1562 if (size_t tail = xlimit - dx) {
1563 *tailPointer = tail;
1564 patch_memory_contexts(memoryCtxPatches, dx, dy, tail);
1565 start(program,dx,dy,base, F0,F0,F0,F0, F0,F0,F0,F0);
1566 restore_memory_contexts(memoryCtxPatches, dx, dy, tail);
1567 *tailPointer = 0xFF;
1568 }
1569 #endif
1570 }
1571}
for(const auto glyph :glyphs)
Definition: FontMgrTest.cpp:52
static void patch_memory_contexts(SkSpan< SkRasterPipeline_MemoryCtxPatch > memoryCtxPatches, size_t dx, size_t dy, size_t tail)
static void restore_memory_contexts(SkSpan< SkRasterPipeline_MemoryCtxPatch > memoryCtxPatches, size_t dx, size_t dy, size_t tail)

◆ store()

template<typename V , typename T >
SI void SK_OPTS_NS::store ( T dst,
V  v 
)

Definition at line 1746 of file SkRasterPipeline_opts.h.

1746 {
1748}

◆ store2()

SI void SK_OPTS_NS::store2 ( uint16_t *  ptr,
U16  r,
U16  g 
)

Definition at line 182 of file SkRasterPipeline_opts.h.

182 {
183 ptr[0] = r;
184 ptr[1] = g;
185 }

◆ store4() [1/2]

SI void SK_OPTS_NS::store4 ( float *  ptr,
F  r,
F  g,
F  b,
F  a 
)

Definition at line 205 of file SkRasterPipeline_opts.h.

205 {
206 ptr[0] = r;
207 ptr[1] = g;
208 ptr[2] = b;
209 ptr[3] = a;
210 }

◆ store4() [2/2]

SI void SK_OPTS_NS::store4 ( uint16_t *  ptr,
U16  r,
U16  g,
U16  b,
U16  a 
)

Definition at line 192 of file SkRasterPipeline_opts.h.

192 {
193 ptr[0] = r;
194 ptr[1] = g;
195 ptr[2] = b;
196 ptr[3] = a;
197 }

◆ strip_sign()

SI F SK_OPTS_NS::strip_sign ( F  x,
U32 sign 
)

Definition at line 2719 of file SkRasterPipeline_opts.h.

2719 {
2720 U32 bits = sk_bit_cast<U32>(x);
2721 *sign = bits & 0x80000000;
2722 return sk_bit_cast<F>(bits ^ *sign);
2723}

◆ sub_fn()

template<typename T >
SI void SK_OPTS_NS::sub_fn ( T dst,
T src 
)

Definition at line 4574 of file SkRasterPipeline_opts.h.

4574 {
4575 *dst -= *src;
4576}

◆ swizzle_copy_masked_fn()

template<int NumSlots>
SI void SK_OPTS_NS::swizzle_copy_masked_fn ( I32 dst,
const I32 src,
uint16_t *  offsets,
I32  mask 
)

Definition at line 4248 of file SkRasterPipeline_opts.h.

4248 {
4249 std::byte* dstB = (std::byte*)dst;
4250 SK_UNROLL for (int count = 0; count < NumSlots; ++count) {
4251 I32* dstS = (I32*)(dstB + *offsets);
4252 *dstS = if_then_else(mask, *src, *dstS);
4253 offsets += 1;
4254 src += 1;
4255 }
4256}

◆ tan_()

SI F SK_OPTS_NS::tan_ ( F  x)

Definition at line 1885 of file SkRasterPipeline_opts.h.

1885 {
1886 constexpr float Pi = SK_FloatPI;
1887 // periodic between -pi/2 ... pi/2
1888 // shift to 0...Pi, scale 1/Pi to get into 0...1, then fract, scale-up, shift-back
1889 x = mad(fract(mad(x, 1/Pi, 0.5f)), Pi, -Pi/2);
1890
1891 I32 neg = (x < 0.0f);
1892 x = if_then_else(neg, -x, x);
1893
1894 // minimize total error by shifting if x > pi/8
1895 I32 use_quotient = (x > (Pi/8));
1896 x = if_then_else(use_quotient, x - (Pi/4), x);
1897
1898 // 9th order poly = 4th order(x^2) * x
1899 const float c4 = 62 / 2835.0f;
1900 const float c3 = 17 / 315.0f;
1901 const float c2 = 2 / 15.0f;
1902 const float c1 = 1 / 3.0f;
1903 const float c0 = 1.0f;
1904 F x2 = x * x;
1905 x *= mad(x2, mad(x2, mad(x2, mad(x2, c4, c3), c2), c1), c0);
1906 x = if_then_else(use_quotient, (1+x)/(1-x), x);
1907 x = if_then_else(neg, -x, x);
1908 return x;
1909}

◆ to_half()

SI U16 SK_OPTS_NS::to_half ( F  f)

Definition at line 1422 of file SkRasterPipeline_opts.h.

1422 {
1423#if defined(JUMPER_IS_NEON) && defined(SK_CPU_ARM64)
1424 return (U16)vcvt_f16_f32(f);
1425
1426#elif defined(JUMPER_IS_SKX)
1427 return (U16)_mm512_cvtps_ph(f, _MM_FROUND_CUR_DIRECTION);
1428
1429#elif defined(JUMPER_IS_HSW)
1430 return (U16)_mm256_cvtps_ph(f, _MM_FROUND_CUR_DIRECTION);
1431
1432#else
1433 // Remember, a float is 1-8-23 (sign-exponent-mantissa) with 127 exponent bias.
1434 U32 sem = sk_bit_cast<U32>(f),
1435 s = sem & 0x80000000,
1436 em = sem ^ s;
1437
1438 // Convert to 1-5-10 half with 15 bias, flushing denorm halfs (including zero) to zero.
1439 auto denorm = (I32)em < 0x38800000; // I32 comparison is often quicker, and always safe here.
1440 return pack((U32)if_then_else(denorm, I32_(0)
1441 , (I32)((s>>16) + (em>>13) - ((127-15)<<10))));
1442#endif
1443}

◆ to_unorm()

SI U32 SK_OPTS_NS::to_unorm ( F  v,
float  scale,
float  bias = 1.0f 
)

Definition at line 2005 of file SkRasterPipeline_opts.h.

2005 {
2006 // Any time we use round() we probably want to use to_unorm().
2007 return round(min(max(0.0f, v), bias), F_(scale));
2008}

◆ trunc_()

SI U32 SK_OPTS_NS::trunc_ ( F  v)

Definition at line 1339 of file SkRasterPipeline_opts.h.

1339{ return (U32)v; }

◆ two()

SI F SK_OPTS_NS::two ( F  x)

Definition at line 2172 of file SkRasterPipeline_opts.h.

2172{ return x + x; }

◆ U32_()

SI constexpr U32 SK_OPTS_NS::U32_ ( uint32_t  x)
constexpr

Definition at line 1301 of file SkRasterPipeline_opts.h.

1301{ return x; }

Variable Documentation

◆ dst

F * SK_OPTS_NS::dst { *dst = sin_(*dst)

Definition at line 4445 of file SkRasterPipeline_opts.h.

◆ F0

constexpr F SK_OPTS_NS::F0 = F_(0.0f)
staticconstexpr

Definition at line 1309 of file SkRasterPipeline_opts.h.

◆ F1

constexpr F SK_OPTS_NS::F1 = F_(1.0f)
static

Definition at line 1310 of file SkRasterPipeline_opts.h.

◆ N

constexpr size_t SK_OPTS_NS::N = sizeof(F) / sizeof(float)
staticconstexpr

Definition at line 1491 of file SkRasterPipeline_opts.h.

◆ S32_alpha_D32_filter_DXDY

constexpr void(* SK_OPTS_NS::S32_alpha_D32_filter_DXDY) (const SkBitmapProcState &, const uint32_t *, int, SkPMColor *) ( const SkBitmapProcState ,
const uint32_t *  ,
int  ,
SkPMColor  
) = nullptr
staticconstexpr

Definition at line 574 of file SkBitmapProcState_opts.h.