Flutter Engine Uber Docs
Docs for the entire Flutter Engine repo.
 
Loading...
Searching...
No Matches
reflector.cc
Go to the documentation of this file.
1// Copyright 2013 The Flutter Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// FLUTTER_NOLINT: https://github.com/flutter/flutter/issues/105732
6
8
9#include <atomic>
10#include <format>
11#include <optional>
12#include <set>
13#include <sstream>
14
15#include "flutter/fml/logging.h"
16#include "fml/backtrace.h"
29#include "runtime_stage_types_flatbuffers.h"
30#include "spirv_common.hpp"
31
32namespace impeller {
33namespace compiler {
34
35static std::string ExecutionModelToString(spv::ExecutionModel model) {
36 switch (model) {
37 case spv::ExecutionModel::ExecutionModelVertex:
38 return "vertex";
39 case spv::ExecutionModel::ExecutionModelFragment:
40 return "fragment";
41 case spv::ExecutionModel::ExecutionModelGLCompute:
42 return "compute";
43 default:
44 return "unsupported";
45 }
46}
47
48static std::string StringToShaderStage(const std::string& str) {
49 if (str == "vertex") {
50 return "ShaderStage::kVertex";
51 }
52
53 if (str == "fragment") {
54 return "ShaderStage::kFragment";
55 }
56
57 if (str == "compute") {
58 return "ShaderStage::kCompute";
59 }
60
61 return "ShaderStage::kUnknown";
62}
63
65 const std::shared_ptr<const spirv_cross::ParsedIR>& ir,
66 const std::shared_ptr<fml::Mapping>& shader_data,
67 const CompilerBackend& compiler)
68 : options_(std::move(options)),
69 ir_(ir),
70 shader_data_(shader_data),
71 compiler_(compiler) {
72 if (!ir_ || !compiler_) {
73 return;
74 }
75
76 if (auto template_arguments = GenerateTemplateArguments();
77 template_arguments.has_value()) {
78 template_arguments_ =
79 std::make_unique<nlohmann::json>(std::move(template_arguments.value()));
80 } else {
81 return;
82 }
83
84 reflection_header_ = GenerateReflectionHeader();
85 if (!reflection_header_) {
86 return;
87 }
88
89 reflection_cc_ = GenerateReflectionCC();
90 if (!reflection_cc_) {
91 return;
92 }
93
94 runtime_stage_shader_ = GenerateRuntimeStageData();
95
96 shader_bundle_data_ = GenerateShaderBundleData();
97 if (!shader_bundle_data_) {
98 return;
99 }
100
101 is_valid_ = true;
102}
103
104Reflector::~Reflector() = default;
105
106bool Reflector::IsValid() const {
107 return is_valid_;
108}
109
110std::shared_ptr<fml::Mapping> Reflector::GetReflectionJSON() const {
111 if (!is_valid_) {
112 return nullptr;
113 }
114
115 auto json_string =
116 std::make_shared<std::string>(template_arguments_->dump(2u));
117
118 return std::make_shared<fml::NonOwnedMapping>(
119 reinterpret_cast<const uint8_t*>(json_string->data()),
120 json_string->size(), [json_string](auto, auto) {});
121}
122
123std::shared_ptr<fml::Mapping> Reflector::GetReflectionHeader() const {
124 return reflection_header_;
125}
126
127std::shared_ptr<fml::Mapping> Reflector::GetReflectionCC() const {
128 return reflection_cc_;
129}
130
131std::shared_ptr<RuntimeStageData::Shader> Reflector::GetRuntimeStageShaderData()
132 const {
133 return runtime_stage_shader_;
134}
135
136std::shared_ptr<ShaderBundleData> Reflector::GetShaderBundleData() const {
137 return shader_bundle_data_;
138}
139
140std::optional<nlohmann::json> Reflector::GenerateTemplateArguments() const {
141 nlohmann::json root;
142
143 const auto& entrypoints = compiler_->get_entry_points_and_stages();
144 if (entrypoints.size() != 1) {
145 VALIDATION_LOG << "Incorrect number of entrypoints in the shader. Found "
146 << entrypoints.size() << " but expected 1.";
147 return std::nullopt;
148 }
149
150 auto execution_model = entrypoints.front().execution_model;
151 {
152 root["entrypoint"] = options_.entry_point_name;
153 root["shader_name"] = options_.shader_name;
154 root["shader_stage"] = ExecutionModelToString(execution_model);
155 root["header_file_name"] = options_.header_file_name;
156 }
157
158 const auto shader_resources = compiler_->get_shader_resources();
159
160 // Subpass Inputs.
161 {
162 auto& subpass_inputs = root["subpass_inputs"] = nlohmann::json::array_t{};
163 if (auto subpass_inputs_json =
164 ReflectResources(shader_resources.subpass_inputs);
165 subpass_inputs_json.has_value()) {
166 for (auto subpass_input : subpass_inputs_json.value()) {
167 subpass_input["descriptor_type"] = "DescriptorType::kInputAttachment";
168 subpass_inputs.emplace_back(std::move(subpass_input));
169 }
170 } else {
171 return std::nullopt;
172 }
173 }
174
175 // Uniform and storage buffers.
176 {
177 auto& buffers = root["buffers"] = nlohmann::json::array_t{};
178 if (auto uniform_buffers_json =
179 ReflectResources(shader_resources.uniform_buffers);
180 uniform_buffers_json.has_value()) {
181 for (auto uniform_buffer : uniform_buffers_json.value()) {
182 uniform_buffer["descriptor_type"] = "DescriptorType::kUniformBuffer";
183 buffers.emplace_back(std::move(uniform_buffer));
184 }
185 } else {
186 return std::nullopt;
187 }
188 if (auto storage_buffers_json =
189 ReflectResources(shader_resources.storage_buffers);
190 storage_buffers_json.has_value()) {
191 for (auto uniform_buffer : storage_buffers_json.value()) {
192 uniform_buffer["descriptor_type"] = "DescriptorType::kStorageBuffer";
193 buffers.emplace_back(std::move(uniform_buffer));
194 }
195 } else {
196 return std::nullopt;
197 }
198 }
199
200 {
201 auto& uniforms = root["uniforms"] = nlohmann::json::array_t{};
202 if (auto uniforms_json =
203 ReflectResources(shader_resources.gl_plain_uniforms);
204 uniforms_json.has_value()) {
205 for (auto uniform : uniforms_json.value()) {
206 uniform["descriptor_type"] = "DescriptorType::kUniform";
207 uniforms.emplace_back(std::move(uniform));
208 }
209 } else {
210 return std::nullopt;
211 }
212 }
213
214 {
215 auto& stage_inputs = root["stage_inputs"] = nlohmann::json::array_t{};
216 if (auto stage_inputs_json = ReflectResources(
217 shader_resources.stage_inputs,
218 /*compute_offsets=*/execution_model == spv::ExecutionModelVertex);
219 stage_inputs_json.has_value()) {
220 stage_inputs = std::move(stage_inputs_json.value());
221 } else {
222 return std::nullopt;
223 }
224 }
225
226 {
227 auto combined_sampled_images =
228 ReflectResources(shader_resources.sampled_images);
229 auto images = ReflectResources(shader_resources.separate_images);
230 auto samplers = ReflectResources(shader_resources.separate_samplers);
231 if (!combined_sampled_images.has_value() || !images.has_value() ||
232 !samplers.has_value()) {
233 return std::nullopt;
234 }
235 auto& sampled_images = root["sampled_images"] = nlohmann::json::array_t{};
236 for (auto value : combined_sampled_images.value()) {
237 value["descriptor_type"] = "DescriptorType::kSampledImage";
238 sampled_images.emplace_back(std::move(value));
239 }
240 for (auto value : images.value()) {
241 value["descriptor_type"] = "DescriptorType::kImage";
242 sampled_images.emplace_back(std::move(value));
243 }
244 for (auto value : samplers.value()) {
245 value["descriptor_type"] = "DescriptorType::kSampledSampler";
246 sampled_images.emplace_back(std::move(value));
247 }
248 }
249
250 if (auto stage_outputs = ReflectResources(shader_resources.stage_outputs);
251 stage_outputs.has_value()) {
252 root["stage_outputs"] = std::move(stage_outputs.value());
253 } else {
254 return std::nullopt;
255 }
256
257 {
258 auto& struct_definitions = root["struct_definitions"] =
259 nlohmann::json::array_t{};
260 if (entrypoints.front().execution_model ==
261 spv::ExecutionModel::ExecutionModelVertex &&
262 !shader_resources.stage_inputs.empty()) {
263 if (auto struc =
264 ReflectPerVertexStructDefinition(shader_resources.stage_inputs);
265 struc.has_value()) {
266 struct_definitions.emplace_back(EmitStructDefinition(struc.value()));
267 } else {
268 // If there are stage inputs, it is an error to not generate a per
269 // vertex data struct for a vertex like shader stage.
270 return std::nullopt;
271 }
272 }
273
274 std::set<spirv_cross::ID> known_structs;
275 ir_->for_each_typed_id<spirv_cross::SPIRType>(
276 [&](uint32_t, const spirv_cross::SPIRType& type) {
277 if (type.basetype != spirv_cross::SPIRType::BaseType::Struct) {
278 return;
279 }
280 // Skip structs that do not have layout offset decorations.
281 // These structs are used internally within the shader and are not
282 // part of the shader's interface.
283 for (size_t i = 0; i < type.member_types.size(); i++) {
284 if (!compiler_->has_member_decoration(type.self, i,
285 spv::DecorationOffset)) {
286 return;
287 }
288 }
289 if (known_structs.find(type.self) != known_structs.end()) {
290 // Iterating over types this way leads to duplicates which may cause
291 // duplicate struct definitions.
292 return;
293 }
294 known_structs.insert(type.self);
295 if (auto struc = ReflectStructDefinition(type.self);
296 struc.has_value()) {
297 struct_definitions.emplace_back(
298 EmitStructDefinition(struc.value()));
299 }
300 });
301 }
302
303 root["bind_prototypes"] =
304 EmitBindPrototypes(shader_resources, execution_model);
305
306 return root;
307}
308
309std::shared_ptr<fml::Mapping> Reflector::GenerateReflectionHeader() const {
310 return InflateTemplate(kReflectionHeaderTemplate);
311}
312
313std::shared_ptr<fml::Mapping> Reflector::GenerateReflectionCC() const {
314 return InflateTemplate(kReflectionCCTemplate);
315}
316
340
341std::shared_ptr<RuntimeStageData::Shader> Reflector::GenerateRuntimeStageData()
342 const {
343 auto backend = GetRuntimeStageBackend(options_.target_platform);
344 if (!backend.has_value()) {
345 return nullptr;
346 }
347
348 const auto& entrypoints = compiler_->get_entry_points_and_stages();
349 if (entrypoints.size() != 1u) {
350 VALIDATION_LOG << "Single entrypoint not found.";
351 return nullptr;
352 }
353 auto data = std::make_unique<RuntimeStageData::Shader>();
354 data->entrypoint = options_.entry_point_name;
355 data->stage = entrypoints.front().execution_model;
356 data->shader = shader_data_;
357 data->backend = backend.value();
358
359 // Sort the IR so that the uniforms are in declaration order.
360 std::vector<spirv_cross::ID> uniforms =
361 SortUniforms(ir_.get(), compiler_.GetCompiler());
362 for (auto& sorted_id : uniforms) {
363 auto var = ir_->ids[sorted_id].get<spirv_cross::SPIRVariable>();
364 const auto spir_type = compiler_->get_type(var.basetype);
365 UniformDescription uniform_description;
366 uniform_description.name = compiler_->get_name(var.self);
367 uniform_description.location = compiler_->get_decoration(
368 var.self, spv::Decoration::DecorationLocation);
369 uniform_description.binding =
370 compiler_->get_decoration(var.self, spv::Decoration::DecorationBinding);
371 uniform_description.type = spir_type.basetype;
372 uniform_description.rows = spir_type.vecsize;
373 uniform_description.columns = spir_type.columns;
374 uniform_description.bit_width = spir_type.width;
375 uniform_description.array_elements = GetArrayElements(spir_type);
376
378 uniform_description.type == spirv_cross::SPIRType::BaseType::Float) {
379 // Metal aligns float3 to 16 bytes.
380 // Metal aligns float3x3 COLUMNS to 16 bytes.
381 // For float3: Size 12. Padding 4. Stride 16.
382 // For float3x3: Size 36. Padding 12 (4 per col). Stride 48.
383
384 if (spir_type.vecsize == 3 &&
385 (spir_type.columns == 1 || spir_type.columns == 3)) {
386 for (size_t c = 0; c < spir_type.columns; c++) {
387 for (size_t v = 0; v < 3; v++) {
388 uniform_description.padding_layout.push_back(
389 fb::PaddingType::kFloat);
390 }
391 uniform_description.padding_layout.push_back(
392 fb::PaddingType::kPadding);
393 }
394 }
395 }
396
398 spir_type.basetype ==
399 spirv_cross::SPIRType::BaseType::SampledImage)
400 << "Vulkan runtime effect had unexpected uniforms outside of the "
401 "uniform buffer object.";
402 data->uniforms.emplace_back(std::move(uniform_description));
403 }
404
405 const auto ubos = compiler_->get_shader_resources().uniform_buffers;
406 if (data->backend == RuntimeStageBackend::kVulkan && !ubos.empty()) {
407 if (ubos.size() != 1 && ubos[0].name != RuntimeStage::kVulkanUBOName) {
408 VALIDATION_LOG << "Expected a single UBO resource named "
409 "'"
411 << "' "
412 "for Vulkan runtime stage backend.";
413 return nullptr;
414 }
415
416 const auto& ubo = ubos[0];
417
418 size_t binding =
419 compiler_->get_decoration(ubo.id, spv::Decoration::DecorationBinding);
420 auto members = ReadStructMembers(ubo.type_id);
421 std::vector<fb::PaddingType> padding_layout;
422 std::vector<StructField> struct_fields;
423 struct_fields.reserve(members.size());
424 size_t float_count = 0;
425
426 for (size_t i = 0; i < members.size(); i += 1) {
427 const auto& member = members[i];
428 std::vector<int> bytes;
429 switch (member.underlying_type) {
431 size_t padding_count =
432 (member.size + sizeof(float) - 1) / sizeof(float);
433 while (padding_count > 0) {
434 padding_layout.push_back(fb::PaddingType::kPadding);
435 padding_count--;
436 }
437 break;
438 }
440 StructField field_desc;
441 field_desc.name = member.name;
442 field_desc.byte_size =
443 member.size * member.array_elements.value_or(1);
444 struct_fields.push_back(field_desc);
445 if (member.array_elements > 1) {
446 // For each array element member, insert 1 layout property per byte
447 // and 0 layout property per byte of padding
448 for (auto i = 0; i < member.array_elements; i++) {
449 for (auto j = 0u; j < member.size / sizeof(float); j++) {
450 padding_layout.push_back(fb::PaddingType::kFloat);
451 }
452 for (auto j = 0u; j < member.element_padding / sizeof(float);
453 j++) {
454 padding_layout.push_back(fb::PaddingType::kPadding);
455 }
456 }
457 } else {
458 size_t member_float_count = member.byte_length / sizeof(float);
459 float_count += member_float_count;
460 while (member_float_count > 0) {
461 padding_layout.push_back(fb::PaddingType::kFloat);
462 member_float_count--;
463 }
464 }
465 break;
466 }
468 VALIDATION_LOG << "Non-floating-type struct member " << member.name
469 << " is not supported.";
470 return nullptr;
471 }
472 }
473 data->uniforms.emplace_back(UniformDescription{
474 .name = ubo.name,
475 .location = binding,
476 .binding = binding,
477 .type = spirv_cross::SPIRType::Struct,
478 .padding_layout = std::move(padding_layout),
479 .struct_fields = std::move(struct_fields),
480 .struct_float_count = float_count,
481 });
482 }
483
484 // We only need to worry about storing vertex attributes.
485 if (entrypoints.front().execution_model == spv::ExecutionModelVertex) {
486 const auto inputs = compiler_->get_shader_resources().stage_inputs;
487 auto input_offsets = ComputeOffsets(inputs);
488 for (const auto& input : inputs) {
489 std::optional<size_t> offset = GetOffset(input.id, input_offsets);
490
491 const auto type = compiler_->get_type(input.type_id);
492
493 InputDescription input_description;
494 input_description.name = input.name;
495 input_description.location = compiler_->get_decoration(
496 input.id, spv::Decoration::DecorationLocation);
497 input_description.set = compiler_->get_decoration(
498 input.id, spv::Decoration::DecorationDescriptorSet);
499 input_description.binding = compiler_->get_decoration(
500 input.id, spv::Decoration::DecorationBinding);
501 input_description.type = type.basetype;
502 input_description.bit_width = type.width;
503 input_description.vec_size = type.vecsize;
504 input_description.columns = type.columns;
505 input_description.offset = offset.value_or(0u);
506 data->inputs.emplace_back(std::move(input_description));
507 }
508 }
509
510 return data;
511}
512
513std::shared_ptr<ShaderBundleData> Reflector::GenerateShaderBundleData() const {
514 const auto& entrypoints = compiler_->get_entry_points_and_stages();
515 if (entrypoints.size() != 1u) {
516 VALIDATION_LOG << "Single entrypoint not found.";
517 return nullptr;
518 }
519 auto data = std::make_shared<ShaderBundleData>(
520 options_.entry_point_name, //
521 entrypoints.front().execution_model, //
522 options_.target_platform //
523 );
524 data->SetShaderData(shader_data_);
525
526 const auto uniforms = compiler_->get_shader_resources().uniform_buffers;
527 for (const auto& uniform : uniforms) {
528 ShaderBundleData::ShaderUniformStruct uniform_struct;
529 uniform_struct.name = uniform.name;
530 uniform_struct.ext_res_0 = compiler_.GetExtendedMSLResourceBinding(
532 uniform_struct.set = compiler_->get_decoration(
533 uniform.id, spv::Decoration::DecorationDescriptorSet);
534 uniform_struct.binding = compiler_->get_decoration(
535 uniform.id, spv::Decoration::DecorationBinding);
536
537 const auto type = compiler_->get_type(uniform.type_id);
538 if (type.basetype != spirv_cross::SPIRType::BaseType::Struct) {
539 std::cerr << "Error: Uniform \"" << uniform.name
540 << "\" is not a struct. All Flutter GPU shader uniforms must "
541 "be structs."
542 << std::endl;
543 return nullptr;
544 }
545
546 size_t size_in_bytes = 0;
547 for (const auto& struct_member : ReadStructMembers(uniform.type_id)) {
548 size_in_bytes += struct_member.byte_length;
549 if (StringStartsWith(struct_member.name, "_PADDING_")) {
550 continue;
551 }
552 ShaderBundleData::ShaderUniformStructField uniform_struct_field;
553 uniform_struct_field.name = struct_member.name;
554 uniform_struct_field.type = struct_member.base_type;
555 uniform_struct_field.offset_in_bytes = struct_member.offset;
556 uniform_struct_field.element_size_in_bytes = struct_member.size;
557 uniform_struct_field.total_size_in_bytes = struct_member.byte_length;
558 uniform_struct_field.array_elements = struct_member.array_elements;
559 uniform_struct.fields.push_back(uniform_struct_field);
560 }
561 uniform_struct.size_in_bytes = size_in_bytes;
562
563 data->AddUniformStruct(uniform_struct);
564 }
565
566 const auto sampled_images = compiler_->get_shader_resources().sampled_images;
567 for (const auto& image : sampled_images) {
568 ShaderBundleData::ShaderUniformTexture uniform_texture;
569 uniform_texture.name = image.name;
570 uniform_texture.ext_res_0 = compiler_.GetExtendedMSLResourceBinding(
572 uniform_texture.set = compiler_->get_decoration(
573 image.id, spv::Decoration::DecorationDescriptorSet);
574 uniform_texture.binding =
575 compiler_->get_decoration(image.id, spv::Decoration::DecorationBinding);
576 data->AddUniformTexture(uniform_texture);
577 }
578
579 // We only need to worry about storing vertex attributes.
580 if (entrypoints.front().execution_model == spv::ExecutionModelVertex) {
581 const auto inputs = compiler_->get_shader_resources().stage_inputs;
582 auto input_offsets = ComputeOffsets(inputs);
583 for (const auto& input : inputs) {
584 std::optional<size_t> offset = GetOffset(input.id, input_offsets);
585
586 const auto type = compiler_->get_type(input.type_id);
587
588 InputDescription input_description;
589 input_description.name = input.name;
590 input_description.location = compiler_->get_decoration(
591 input.id, spv::Decoration::DecorationLocation);
592 input_description.set = compiler_->get_decoration(
593 input.id, spv::Decoration::DecorationDescriptorSet);
594 input_description.binding = compiler_->get_decoration(
595 input.id, spv::Decoration::DecorationBinding);
596 input_description.type = type.basetype;
597 input_description.bit_width = type.width;
598 input_description.vec_size = type.vecsize;
599 input_description.columns = type.columns;
600 input_description.offset = offset.value_or(0u);
601 data->AddInputDescription(std::move(input_description));
602 }
603 }
604
605 return data;
606}
607
608std::optional<uint32_t> Reflector::GetArrayElements(
609 const spirv_cross::SPIRType& type) const {
610 if (type.array.empty()) {
611 return std::nullopt;
612 }
613 FML_CHECK(type.array.size() == 1)
614 << "Multi-dimensional arrays are not supported.";
615 FML_CHECK(type.array_size_literal.front())
616 << "Must use a literal for array sizes.";
617 return type.array.front();
618}
619
621 switch (type) {
623 return "Metal Shading Language";
625 return "OpenGL Shading Language";
627 return "OpenGL Shading Language (Relaxed Vulkan Semantics)";
629 return "SkSL Shading Language";
630 }
632}
633
634std::shared_ptr<fml::Mapping> Reflector::InflateTemplate(
635 std::string_view tmpl) const {
636 inja::Environment env;
637 env.set_trim_blocks(true);
638 env.set_lstrip_blocks(true);
639
640 env.add_callback("camel_case", 1u, [](inja::Arguments& args) {
641 return ToCamelCase(args.at(0u)->get<std::string>());
642 });
643
644 env.add_callback("to_shader_stage", 1u, [](inja::Arguments& args) {
645 return StringToShaderStage(args.at(0u)->get<std::string>());
646 });
647
648 env.add_callback("get_generator_name", 0u,
649 [type = compiler_.GetType()](inja::Arguments& args) {
650 return ToString(type);
651 });
652
653 auto inflated_template =
654 std::make_shared<std::string>(env.render(tmpl, *template_arguments_));
655
656 return std::make_shared<fml::NonOwnedMapping>(
657 reinterpret_cast<const uint8_t*>(inflated_template->data()),
658 inflated_template->size(), [inflated_template](auto, auto) {});
659}
660
661std::vector<size_t> Reflector::ComputeOffsets(
662 const spirv_cross::SmallVector<spirv_cross::Resource>& resources) const {
663 std::vector<size_t> offsets(resources.size(), 0);
664 if (resources.size() == 0) {
665 return offsets;
666 }
667 for (const auto& resource : resources) {
668 const auto type = compiler_->get_type(resource.type_id);
669 auto location = compiler_->get_decoration(
670 resource.id, spv::Decoration::DecorationLocation);
671 // Malformed shader, will be caught later on.
672 if (location >= resources.size() || location < 0) {
673 location = 0;
674 }
675 offsets[location] = (type.width * type.vecsize) / 8;
676 }
677 for (size_t i = 1; i < resources.size(); i++) {
678 offsets[i] += offsets[i - 1];
679 }
680 for (size_t i = resources.size() - 1; i > 0; i--) {
681 offsets[i] = offsets[i - 1];
682 }
683 offsets[0] = 0;
684
685 return offsets;
686}
687
688std::optional<size_t> Reflector::GetOffset(
689 spirv_cross::ID id,
690 const std::vector<size_t>& offsets) const {
691 uint32_t location =
692 compiler_->get_decoration(id, spv::Decoration::DecorationLocation);
693 if (location >= offsets.size()) {
694 return std::nullopt;
695 }
696 return offsets[location];
697}
698
699std::optional<nlohmann::json::object_t> Reflector::ReflectResource(
700 const spirv_cross::Resource& resource,
701 std::optional<size_t> offset) const {
702 nlohmann::json::object_t result;
703
704 result["name"] = resource.name;
705 result["descriptor_set"] = compiler_->get_decoration(
706 resource.id, spv::Decoration::DecorationDescriptorSet);
707 result["binding"] = compiler_->get_decoration(
708 resource.id, spv::Decoration::DecorationBinding);
709 result["set"] = compiler_->get_decoration(
710 resource.id, spv::Decoration::DecorationDescriptorSet);
711 result["location"] = compiler_->get_decoration(
712 resource.id, spv::Decoration::DecorationLocation);
713 result["index"] =
714 compiler_->get_decoration(resource.id, spv::Decoration::DecorationIndex);
715 result["ext_res_0"] = compiler_.GetExtendedMSLResourceBinding(
717 result["ext_res_1"] = compiler_.GetExtendedMSLResourceBinding(
719 result["relaxed_precision"] =
720 compiler_->get_decoration(
721 resource.id, spv::Decoration::DecorationRelaxedPrecision) == 1;
722 result["offset"] = offset.value_or(0u);
723 auto type = ReflectType(resource.type_id);
724 if (!type.has_value()) {
725 return std::nullopt;
726 }
727 result["type"] = std::move(type.value());
728 return result;
729}
730
731std::optional<nlohmann::json::object_t> Reflector::ReflectType(
732 const spirv_cross::TypeID& type_id) const {
733 nlohmann::json::object_t result;
734
735 const auto type = compiler_->get_type(type_id);
736
737 result["type_name"] = StructMember::BaseTypeToString(type.basetype);
738 result["bit_width"] = type.width;
739 result["vec_size"] = type.vecsize;
740 result["columns"] = type.columns;
741 auto& members = result["members"] = nlohmann::json::array_t{};
742 if (type.basetype == spirv_cross::SPIRType::BaseType::Struct) {
743 for (const auto& struct_member : ReadStructMembers(type_id)) {
744 auto member = nlohmann::json::object_t{};
745 member["name"] = struct_member.name;
746 member["type"] = struct_member.type;
747 member["base_type"] =
748 StructMember::BaseTypeToString(struct_member.base_type);
749 member["offset"] = struct_member.offset;
750 member["size"] = struct_member.size;
751 member["byte_length"] = struct_member.byte_length;
752 if (struct_member.array_elements.has_value()) {
753 member["array_elements"] = struct_member.array_elements.value();
754 } else {
755 member["array_elements"] = "std::nullopt";
756 }
757 members.emplace_back(std::move(member));
758 }
759 }
760
761 return result;
762}
763
764std::optional<nlohmann::json::array_t> Reflector::ReflectResources(
765 const spirv_cross::SmallVector<spirv_cross::Resource>& resources,
766 bool compute_offsets) const {
767 nlohmann::json::array_t result;
768 result.reserve(resources.size());
769 std::vector<size_t> offsets;
770 if (compute_offsets) {
771 offsets = ComputeOffsets(resources);
772 }
773 for (const auto& resource : resources) {
774 std::optional<size_t> maybe_offset = std::nullopt;
775 if (compute_offsets) {
776 maybe_offset = GetOffset(resource.id, offsets);
777 }
778 if (auto reflected = ReflectResource(resource, maybe_offset);
779 reflected.has_value()) {
780 result.emplace_back(std::move(reflected.value()));
781 } else {
782 return std::nullopt;
783 }
784 }
785 return result;
786}
787
788static std::string TypeNameWithPaddingOfSize(size_t size) {
789 std::stringstream stream;
790 stream << "Padding<" << size << ">";
791 return stream.str();
792}
793
794struct KnownType {
795 std::string name;
796 size_t byte_size = 0;
797};
798
799static std::optional<KnownType> ReadKnownScalarType(
800 spirv_cross::SPIRType::BaseType type) {
801 switch (type) {
802 case spirv_cross::SPIRType::BaseType::Boolean:
803 return KnownType{
804 .name = "bool",
805 .byte_size = sizeof(bool),
806 };
807 case spirv_cross::SPIRType::BaseType::Float:
808 return KnownType{
809 .name = "Scalar",
810 .byte_size = sizeof(Scalar),
811 };
812 case spirv_cross::SPIRType::BaseType::Half:
813 return KnownType{
814 .name = "Half",
815 .byte_size = sizeof(Half),
816 };
817 case spirv_cross::SPIRType::BaseType::UInt:
818 return KnownType{
819 .name = "uint32_t",
820 .byte_size = sizeof(uint32_t),
821 };
822 case spirv_cross::SPIRType::BaseType::Int:
823 return KnownType{
824 .name = "int32_t",
825 .byte_size = sizeof(int32_t),
826 };
827 default:
828 break;
829 }
830 return std::nullopt;
831}
832
833//------------------------------------------------------------------------------
834/// @brief Get the reflected struct size. In the vast majority of the
835/// cases, this is the same as the declared struct size as given by
836/// the compiler. But, additional padding may need to be introduced
837/// after the end of the struct to keep in line with the alignment
838/// requirement of the individual struct members. This method
839/// figures out the actual size of the reflected struct that can be
840/// referenced in native code.
841///
842/// @param[in] members The members
843///
844/// @return The reflected structure size.
845///
846static size_t GetReflectedStructSize(const std::vector<StructMember>& members) {
847 auto struct_size = 0u;
848 for (const auto& member : members) {
849 struct_size += member.byte_length;
850 }
851 return struct_size;
852}
853
854std::vector<StructMember> Reflector::ReadStructMembers(
855 const spirv_cross::TypeID& type_id) const {
856 const auto& struct_type = compiler_->get_type(type_id);
857 FML_CHECK(struct_type.basetype == spirv_cross::SPIRType::BaseType::Struct);
858
859 std::vector<StructMember> result;
860
861 size_t current_byte_offset = 0;
862 size_t max_member_alignment = 0;
863
864 for (size_t i = 0; i < struct_type.member_types.size(); i++) {
865 const spirv_cross::SPIRType& member =
866 compiler_->get_type(struct_type.member_types[i]);
867 const uint32_t struct_member_offset =
868 compiler_->type_struct_member_offset(struct_type, i);
869 std::optional<uint32_t> array_elements = GetArrayElements(member);
870
871 if (struct_member_offset > current_byte_offset) {
872 const size_t alignment_pad = struct_member_offset - current_byte_offset;
873 result.emplace_back(StructMember{
874 /*p_type=*/TypeNameWithPaddingOfSize(alignment_pad),
875 /*p_base_type=*/spirv_cross::SPIRType::BaseType::Void,
876 /*p_name=*/
877 std::format("_PADDING_{}_", GetMemberNameAtIndex(struct_type, i)),
878 /*p_offset=*/current_byte_offset,
879 /*p_size=*/alignment_pad,
880 /*p_byte_length=*/alignment_pad,
881 /*p_array_elements=*/std::nullopt,
882 /*p_element_padding=*/0,
883 });
884 current_byte_offset += alignment_pad;
885 }
886
887 max_member_alignment =
888 std::max<size_t>(max_member_alignment,
889 (member.width / 8) * member.columns * member.vecsize);
890
891 FML_CHECK(current_byte_offset == struct_member_offset);
892
893 // A user defined struct.
894 if (member.basetype == spirv_cross::SPIRType::BaseType::Struct) {
895 const size_t size =
896 GetReflectedStructSize(ReadStructMembers(member.self));
897 uint32_t stride = GetArrayStride<0>(struct_type, member, i);
898 if (stride == 0) {
899 stride = size;
900 }
901 uint32_t element_padding = stride - size;
902 result.emplace_back(StructMember{
903 /*p_type=*/compiler_->get_name(member.self),
904 /*p_base_type=*/member.basetype,
905 /*p_name=*/GetMemberNameAtIndex(struct_type, i),
906 /*p_offset=*/struct_member_offset,
907 /*p_size=*/size,
908 /*p_byte_length=*/stride * array_elements.value_or(1),
909 /*p_array_elements=*/array_elements,
910 /*p_element_padding=*/element_padding,
911 });
912 current_byte_offset += stride * array_elements.value_or(1);
913 continue;
914 }
915
916 // Mat2
917 if (member.basetype == spirv_cross::SPIRType::BaseType::Float &&
918 member.width == 32 && member.columns == 2 && member.vecsize == 2) {
919 // Mat2's are packaged like 2 vec2's, ie
920 // {val, val, padding, padding, val, val, padding, padding}.
921 uint32_t count = array_elements.value_or(1) * 2;
922 uint32_t stride = 16;
923 uint32_t total_length = stride * count;
924
925 result.emplace_back(StructMember{
926 /*p_type=*/"Mat2",
927 /*p_base_type=*/spirv_cross::SPIRType::BaseType::Float,
928 /*p_name=*/GetMemberNameAtIndex(struct_type, i),
929 /*p_offset=*/struct_member_offset,
930 /*p_size=*/sizeof(Point),
931 /*p_byte_length=*/total_length,
932 /*p_array_elements=*/count,
933 /*p_element_padding=*/8,
934 });
935 current_byte_offset += total_length;
936 continue;
937 }
938
939 if (member.basetype == spirv_cross::SPIRType::BaseType::Float &&
940 member.width == 32 && member.columns == 3 && member.vecsize == 3) {
941 // Mat3s are packed as three vec3s with one float of padding after each.
942 // {val, val, val, padding, val, val, val, padding, val, val, val,
943 // padding}.
944 uint32_t count = array_elements.value_or(1) * 3;
945 uint32_t stride = 16;
946 uint32_t total_length = stride * count;
947
948 result.emplace_back(StructMember{
949 /*p_type=*/"Mat3",
950 /*p_base_type=*/spirv_cross::SPIRType::BaseType::Float,
951 /*p_name=*/GetMemberNameAtIndex(struct_type, i),
952 /*p_offset=*/struct_member_offset,
953 /*p_size=*/12,
954 /*p_byte_length=*/total_length,
955 /*p_array_elements=*/count,
956 /*p_element_padding=*/4,
957 });
958 current_byte_offset += total_length;
959 continue;
960 }
961
962 // Tightly packed 4x4 Matrix is special cased as we know how to work with
963 // those.
964 if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
965 member.width == sizeof(Scalar) * 8 && //
966 member.columns == 4 && //
967 member.vecsize == 4 //
968 ) {
969 uint32_t stride = GetArrayStride<sizeof(Matrix)>(struct_type, member, i);
970 uint32_t element_padding = stride - sizeof(Matrix);
971 result.emplace_back(StructMember{
972 /*p_type=*/"Matrix",
973 /*p_base_type=*/member.basetype,
974 /*p_name=*/GetMemberNameAtIndex(struct_type, i),
975 /*p_offset=*/struct_member_offset,
976 /*p_size=*/sizeof(Matrix),
977 /*p_byte_length=*/stride * array_elements.value_or(1),
978 /*p_array_elements=*/array_elements,
979 /*p_element_padding=*/element_padding,
980 });
981 current_byte_offset += stride * array_elements.value_or(1);
982 continue;
983 }
984
985 // Tightly packed UintPoint32 (uvec2)
986 if (member.basetype == spirv_cross::SPIRType::BaseType::UInt && //
987 member.width == sizeof(uint32_t) * 8 && //
988 member.columns == 1 && //
989 member.vecsize == 2 //
990 ) {
991 uint32_t stride =
992 GetArrayStride<sizeof(UintPoint32)>(struct_type, member, i);
993 uint32_t element_padding = stride - sizeof(UintPoint32);
994 result.emplace_back(StructMember{
995 /*p_type=*/"UintPoint32",
996 /*p_base_type=*/member.basetype,
997 /*p_name=*/GetMemberNameAtIndex(struct_type, i),
998 /*p_offset=*/struct_member_offset,
999 /*p_size=*/sizeof(UintPoint32),
1000 /*p_byte_length=*/stride * array_elements.value_or(1),
1001 /*p_array_elements=*/array_elements,
1002 /*p_element_padding=*/element_padding,
1003 });
1004 current_byte_offset += stride * array_elements.value_or(1);
1005 continue;
1006 }
1007
1008 // Tightly packed UintPoint32 (ivec2)
1009 if (member.basetype == spirv_cross::SPIRType::BaseType::Int && //
1010 member.width == sizeof(int32_t) * 8 && //
1011 member.columns == 1 && //
1012 member.vecsize == 2 //
1013 ) {
1014 uint32_t stride =
1015 GetArrayStride<sizeof(IPoint32)>(struct_type, member, i);
1016 uint32_t element_padding = stride - sizeof(IPoint32);
1017 result.emplace_back(StructMember{
1018 /*p_type=*/"IPoint32",
1019 /*p_base_type=*/member.basetype,
1020 /*p_name=*/GetMemberNameAtIndex(struct_type, i),
1021 /*p_offset=*/struct_member_offset,
1022 /*p_size=*/sizeof(IPoint32),
1023 /*p_byte_length=*/stride * array_elements.value_or(1),
1024 /*p_array_elements=*/array_elements,
1025 /*p_element_padding=*/element_padding,
1026 });
1027 current_byte_offset += stride * array_elements.value_or(1);
1028 continue;
1029 }
1030
1031 // Tightly packed Point (vec2).
1032 if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
1033 member.width == sizeof(float) * 8 && //
1034 member.columns == 1 && //
1035 member.vecsize == 2 //
1036 ) {
1037 uint32_t stride = GetArrayStride<sizeof(Point)>(struct_type, member, i);
1038 uint32_t element_padding = stride - sizeof(Point);
1039 result.emplace_back(StructMember{
1040 /*p_type=*/"Point",
1041 /*p_base_type=*/member.basetype,
1042 /*p_name=*/GetMemberNameAtIndex(struct_type, i),
1043 /*p_offset=*/struct_member_offset,
1044 /*p_size=*/sizeof(Point),
1045 /*p_byte_length=*/stride * array_elements.value_or(1),
1046 /*p_array_elements=*/array_elements,
1047 /*p_element_padding=*/element_padding,
1048 });
1049 current_byte_offset += stride * array_elements.value_or(1);
1050 continue;
1051 }
1052
1053 // Tightly packed Vector3.
1054 if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
1055 member.width == sizeof(float) * 8 && //
1056 member.columns == 1 && //
1057 member.vecsize == 3 //
1058 ) {
1059 uint32_t stride = GetArrayStride<sizeof(Vector3)>(struct_type, member, i);
1060 uint32_t element_padding = stride - sizeof(Vector3);
1061 result.emplace_back(StructMember{
1062 /*p_type=*/"Vector3",
1063 /*p_base_type=*/member.basetype,
1064 /*p_name=*/GetMemberNameAtIndex(struct_type, i),
1065 /*p_offset=*/struct_member_offset,
1066 /*p_size=*/sizeof(Vector3),
1067 /*p_byte_length=*/stride * array_elements.value_or(1),
1068 /*p_array_elements=*/array_elements,
1069 /*p_element_padding=*/element_padding,
1070 });
1071 current_byte_offset += stride * array_elements.value_or(1);
1072 continue;
1073 }
1074
1075 // Tightly packed Vector4.
1076 if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
1077 member.width == sizeof(float) * 8 && //
1078 member.columns == 1 && //
1079 member.vecsize == 4 //
1080 ) {
1081 uint32_t stride = GetArrayStride<sizeof(Vector4)>(struct_type, member, i);
1082 uint32_t element_padding = stride - sizeof(Vector4);
1083 result.emplace_back(StructMember{
1084 /*p_type=*/"Vector4",
1085 /*p_base_type=*/member.basetype,
1086 /*p_name=*/GetMemberNameAtIndex(struct_type, i),
1087 /*p_offset=*/struct_member_offset,
1088 /*p_size=*/sizeof(Vector4),
1089 /*p_byte_length=*/stride * array_elements.value_or(1),
1090 /*p_array_elements=*/array_elements,
1091 /*p_element_padding=*/element_padding,
1092 });
1093 current_byte_offset += stride * array_elements.value_or(1);
1094 continue;
1095 }
1096
1097 // Tightly packed half Point (vec2).
1098 if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
1099 member.width == sizeof(Half) * 8 && //
1100 member.columns == 1 && //
1101 member.vecsize == 2 //
1102 ) {
1103 uint32_t stride =
1104 GetArrayStride<sizeof(HalfVector2)>(struct_type, member, i);
1105 uint32_t element_padding = stride - sizeof(HalfVector2);
1106 result.emplace_back(StructMember{
1107 /*p_type=*/"HalfVector2",
1108 /*p_base_type=*/member.basetype,
1109 /*p_name=*/GetMemberNameAtIndex(struct_type, i),
1110 /*p_offset=*/struct_member_offset,
1111 /*p_size=*/sizeof(HalfVector2),
1112 /*p_byte_length=*/stride * array_elements.value_or(1),
1113 /*p_array_elements=*/array_elements,
1114 /*p_element_padding=*/element_padding,
1115 });
1116 current_byte_offset += stride * array_elements.value_or(1);
1117 continue;
1118 }
1119
1120 // Tightly packed Half Float Vector3.
1121 if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
1122 member.width == sizeof(Half) * 8 && //
1123 member.columns == 1 && //
1124 member.vecsize == 3 //
1125 ) {
1126 uint32_t stride =
1127 GetArrayStride<sizeof(HalfVector3)>(struct_type, member, i);
1128 uint32_t element_padding = stride - sizeof(HalfVector3);
1129 result.emplace_back(StructMember{
1130 /*p_type=*/"HalfVector3",
1131 /*p_base_type=*/member.basetype,
1132 /*p_name=*/GetMemberNameAtIndex(struct_type, i),
1133 /*p_offset=*/struct_member_offset,
1134 /*p_size=*/sizeof(HalfVector3),
1135 /*p_byte_length=*/stride * array_elements.value_or(1),
1136 /*p_array_elements=*/array_elements,
1137 /*p_element_padding=*/element_padding,
1138 });
1139 current_byte_offset += stride * array_elements.value_or(1);
1140 continue;
1141 }
1142
1143 // Tightly packed Half Float Vector4.
1144 if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
1145 member.width == sizeof(Half) * 8 && //
1146 member.columns == 1 && //
1147 member.vecsize == 4 //
1148 ) {
1149 uint32_t stride =
1150 GetArrayStride<sizeof(HalfVector4)>(struct_type, member, i);
1151 uint32_t element_padding = stride - sizeof(HalfVector4);
1152 result.emplace_back(StructMember{
1153 /*p_type=*/"HalfVector4",
1154 /*p_base_type=*/member.basetype,
1155 /*p_name=*/GetMemberNameAtIndex(struct_type, i),
1156 /*p_offset=*/struct_member_offset,
1157 /*p_size=*/sizeof(HalfVector4),
1158 /*p_byte_length=*/stride * array_elements.value_or(1),
1159 /*p_array_elements=*/array_elements,
1160 /*p_element_padding=*/element_padding,
1161 });
1162 current_byte_offset += stride * array_elements.value_or(1);
1163 continue;
1164 }
1165
1166 // Other isolated scalars (like bool, int, float/Scalar, etc..).
1167 {
1168 auto maybe_known_type = ReadKnownScalarType(member.basetype);
1169 if (maybe_known_type.has_value() && //
1170 member.columns == 1 && //
1171 member.vecsize == 1 //
1172 ) {
1173 uint32_t stride = GetArrayStride<0>(struct_type, member, i);
1174 if (stride == 0) {
1175 stride = maybe_known_type.value().byte_size;
1176 }
1177 uint32_t element_padding = stride - maybe_known_type.value().byte_size;
1178 // Add the type directly.
1179 result.emplace_back(StructMember{
1180 /*p_type=*/maybe_known_type.value().name,
1181 /*p_base_type=*/member.basetype,
1182 /*p_name=*/GetMemberNameAtIndex(struct_type, i),
1183 /*p_offset=*/struct_member_offset,
1184 /*p_size=*/maybe_known_type.value().byte_size,
1185 /*p_byte_length=*/stride * array_elements.value_or(1),
1186 /*p_array_elements=*/array_elements,
1187 /*p_element_padding=*/element_padding,
1188 });
1189 current_byte_offset += stride * array_elements.value_or(1);
1190 continue;
1191 }
1192 }
1193
1194 // Catch all for unknown types. Just add the necessary padding to the struct
1195 // and move on.
1196 {
1197 const size_t size = (member.width * member.columns * member.vecsize) / 8u;
1198 uint32_t stride = GetArrayStride<0>(struct_type, member, i);
1199 if (stride == 0) {
1200 stride = size;
1201 }
1202 size_t element_padding = stride - size;
1203 result.emplace_back(StructMember{
1204 /*p_type=*/TypeNameWithPaddingOfSize(size),
1205 /*p_base_type=*/member.basetype,
1206 /*p_name=*/GetMemberNameAtIndex(struct_type, i),
1207 /*p_offset=*/struct_member_offset,
1208 /*p_size=*/size,
1209 /*p_byte_length=*/stride * array_elements.value_or(1),
1210 /*p_array_elements=*/array_elements,
1211 /*p_element_padding=*/element_padding,
1212 });
1213 current_byte_offset += stride * array_elements.value_or(1);
1214 continue;
1215 }
1216 }
1217
1218 if (max_member_alignment > 0u) {
1219 const size_t struct_length = current_byte_offset;
1220 {
1221 const size_t excess = struct_length % max_member_alignment;
1222 if (excess != 0) {
1223 const auto padding = max_member_alignment - excess;
1224 result.emplace_back(StructMember{
1226 /*p_base_type=*/spirv_cross::SPIRType::BaseType::Void,
1227 /*p_name=*/"_PADDING_",
1228 /*p_offset=*/current_byte_offset,
1229 /*p_size=*/padding,
1230 /*p_byte_length=*/padding,
1231 /*p_array_elements=*/std::nullopt,
1232 /*p_element_padding=*/0,
1233 });
1234 }
1235 }
1236 }
1237
1238 return result;
1239}
1240
1241std::optional<Reflector::StructDefinition> Reflector::ReflectStructDefinition(
1242 const spirv_cross::TypeID& type_id) const {
1243 const auto& type = compiler_->get_type(type_id);
1244 if (type.basetype != spirv_cross::SPIRType::BaseType::Struct) {
1245 return std::nullopt;
1246 }
1247
1248 const auto struct_name = compiler_->get_name(type_id);
1249 if (struct_name.find("_RESERVED_IDENTIFIER_") != std::string::npos) {
1250 return std::nullopt;
1251 }
1252
1253 auto struct_members = ReadStructMembers(type_id);
1254 auto reflected_struct_size = GetReflectedStructSize(struct_members);
1255
1256 StructDefinition struc;
1257 struc.name = struct_name;
1258 struc.byte_length = reflected_struct_size;
1259 struc.members = std::move(struct_members);
1260 return struc;
1261}
1262
1263nlohmann::json::object_t Reflector::EmitStructDefinition(
1264 std::optional<Reflector::StructDefinition> struc) const {
1265 nlohmann::json::object_t result;
1266 result["name"] = struc->name;
1267 result["byte_length"] = struc->byte_length;
1268 auto& members = result["members"] = nlohmann::json::array_t{};
1269 for (const auto& struct_member : struc->members) {
1270 auto& member = members.emplace_back(nlohmann::json::object_t{});
1271 member["name"] = struct_member.name;
1272 member["type"] = struct_member.type;
1273 member["base_type"] =
1274 StructMember::BaseTypeToString(struct_member.base_type);
1275 member["offset"] = struct_member.offset;
1276 member["byte_length"] = struct_member.byte_length;
1277 if (struct_member.array_elements.has_value()) {
1278 member["array_elements"] = struct_member.array_elements.value();
1279 } else {
1280 member["array_elements"] = "std::nullopt";
1281 }
1282 member["element_padding"] = struct_member.element_padding;
1283 }
1284 return result;
1285}
1286
1288 std::string type_name;
1289 spirv_cross::SPIRType::BaseType base_type;
1290 std::string variable_name;
1291 size_t byte_length = 0u;
1292};
1293
1295 const spirv_cross::Compiler& compiler,
1296 const spirv_cross::Resource* resource) {
1297 VertexType result;
1298 result.variable_name = resource->name;
1299 const auto& type = compiler.get_type(resource->type_id);
1300 result.base_type = type.basetype;
1301 const auto total_size = type.columns * type.vecsize * type.width / 8u;
1302 result.byte_length = total_size;
1303
1304 if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1305 type.columns == 1u && type.vecsize == 2u &&
1306 type.width == sizeof(float) * 8u) {
1307 result.type_name = "Point";
1308 } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1309 type.columns == 1u && type.vecsize == 4u &&
1310 type.width == sizeof(float) * 8u) {
1311 result.type_name = "Vector4";
1312 } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1313 type.columns == 1u && type.vecsize == 3u &&
1314 type.width == sizeof(float) * 8u) {
1315 result.type_name = "Vector3";
1316 } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1317 type.columns == 1u && type.vecsize == 1u &&
1318 type.width == sizeof(float) * 8u) {
1319 result.type_name = "Scalar";
1320 } else if (type.basetype == spirv_cross::SPIRType::BaseType::Int &&
1321 type.columns == 1u && type.vecsize == 1u &&
1322 type.width == sizeof(int32_t) * 8u) {
1323 result.type_name = "int32_t";
1324 } else {
1325 // Catch all unknown padding.
1326 result.type_name = TypeNameWithPaddingOfSize(total_size);
1327 }
1328
1329 return result;
1330}
1331
1332std::optional<Reflector::StructDefinition>
1333Reflector::ReflectPerVertexStructDefinition(
1334 const spirv_cross::SmallVector<spirv_cross::Resource>& stage_inputs) const {
1335 // Avoid emitting a zero sized structure. The code gen templates assume a
1336 // non-zero size.
1337 if (stage_inputs.empty()) {
1338 return std::nullopt;
1339 }
1340
1341 // Validate locations are contiguous and there are no duplicates.
1342 std::set<uint32_t> locations;
1343 for (const auto& input : stage_inputs) {
1344 auto location = compiler_->get_decoration(
1345 input.id, spv::Decoration::DecorationLocation);
1346 if (locations.count(location) != 0) {
1347 // Duplicate location. Bail.
1348 return std::nullopt;
1349 }
1350 locations.insert(location);
1351 }
1352
1353 for (size_t i = 0; i < locations.size(); i++) {
1354 if (locations.count(i) != 1) {
1355 // Locations are not contiguous. This usually happens when a single stage
1356 // input takes multiple input slots. No reflection information can be
1357 // generated for such cases anyway. So bail! It is up to the shader author
1358 // to make sure one stage input maps to a single input slot.
1359 return std::nullopt;
1360 }
1361 }
1362
1363 auto input_for_location =
1364 [&](uint32_t queried_location) -> const spirv_cross::Resource* {
1365 for (const auto& input : stage_inputs) {
1366 auto location = compiler_->get_decoration(
1367 input.id, spv::Decoration::DecorationLocation);
1368 if (location == queried_location) {
1369 return &input;
1370 }
1371 }
1372 // This really cannot happen with all the validation above.
1374 return nullptr;
1375 };
1376
1377 StructDefinition struc;
1378 struc.name = "PerVertexData";
1379 struc.byte_length = 0u;
1380 for (size_t i = 0; i < locations.size(); i++) {
1381 auto resource = input_for_location(i);
1382 if (resource == nullptr) {
1383 return std::nullopt;
1384 }
1385 const auto vertex_type =
1386 VertexTypeFromInputResource(*compiler_.GetCompiler(), resource);
1387
1388 auto member = StructMember{
1389 /*p_type=*/vertex_type.type_name,
1390 /*p_base_type=*/vertex_type.base_type,
1391 /*p_name=*/vertex_type.variable_name,
1392 /*p_offset=*/struc.byte_length,
1393 /*p_size=*/vertex_type.byte_length,
1394 /*p_byte_length=*/vertex_type.byte_length,
1395 /*p_array_elements=*/std::nullopt,
1396 /*p_element_padding=*/0,
1397 };
1398 struc.byte_length += vertex_type.byte_length;
1399 struc.members.emplace_back(std::move(member));
1400 }
1401 return struc;
1402}
1403
1404std::optional<std::string> Reflector::GetMemberNameAtIndexIfExists(
1405 const spirv_cross::SPIRType& parent_type,
1406 size_t index) const {
1407 if (parent_type.type_alias != 0) {
1408 return GetMemberNameAtIndexIfExists(
1409 compiler_->get_type(parent_type.type_alias), index);
1410 }
1411
1412 if (auto found = ir_->meta.find(parent_type.self); found != ir_->meta.end()) {
1413 const auto& members = found->second.members;
1414 if (index < members.size() && !members[index].alias.empty()) {
1415 return members[index].alias;
1416 }
1417 }
1418 return std::nullopt;
1419}
1420
1421std::string Reflector::GetMemberNameAtIndex(
1422 const spirv_cross::SPIRType& parent_type,
1423 size_t index,
1424 std::string suffix) const {
1425 if (auto name = GetMemberNameAtIndexIfExists(parent_type, index);
1426 name.has_value()) {
1427 return name.value();
1428 }
1429 static std::atomic_size_t sUnnamedMembersID;
1430 std::stringstream stream;
1431 stream << "unnamed_" << sUnnamedMembersID++ << suffix;
1432 return stream.str();
1433}
1434
1435std::vector<Reflector::BindPrototype> Reflector::ReflectBindPrototypes(
1436 const spirv_cross::ShaderResources& resources,
1437 spv::ExecutionModel execution_model) const {
1438 std::vector<BindPrototype> prototypes;
1439 for (const auto& uniform_buffer : resources.uniform_buffers) {
1440 auto& proto = prototypes.emplace_back(BindPrototype{});
1441 proto.return_type = "bool";
1442 proto.name = ToCamelCase(uniform_buffer.name);
1443 proto.descriptor_type = "DescriptorType::kUniformBuffer";
1444 {
1445 std::stringstream stream;
1446 stream << "Bind uniform buffer for resource named " << uniform_buffer.name
1447 << ".";
1448 proto.docstring = stream.str();
1449 }
1450 proto.args.push_back(BindPrototypeArgument{
1451 .type_name = "ResourceBinder&",
1452 .argument_name = "command",
1453 });
1454 proto.args.push_back(BindPrototypeArgument{
1455 .type_name = "BufferView",
1456 .argument_name = "view",
1457 });
1458 }
1459 for (const auto& storage_buffer : resources.storage_buffers) {
1460 auto& proto = prototypes.emplace_back(BindPrototype{});
1461 proto.return_type = "bool";
1462 proto.name = ToCamelCase(storage_buffer.name);
1463 proto.descriptor_type = "DescriptorType::kStorageBuffer";
1464 {
1465 std::stringstream stream;
1466 stream << "Bind storage buffer for resource named " << storage_buffer.name
1467 << ".";
1468 proto.docstring = stream.str();
1469 }
1470 proto.args.push_back(BindPrototypeArgument{
1471 .type_name = "ResourceBinder&",
1472 .argument_name = "command",
1473 });
1474 proto.args.push_back(BindPrototypeArgument{
1475 .type_name = "BufferView",
1476 .argument_name = "view",
1477 });
1478 }
1479 for (const auto& sampled_image : resources.sampled_images) {
1480 auto& proto = prototypes.emplace_back(BindPrototype{});
1481 proto.return_type = "bool";
1482 proto.name = ToCamelCase(sampled_image.name);
1483 proto.descriptor_type = "DescriptorType::kSampledImage";
1484 {
1485 std::stringstream stream;
1486 stream << "Bind combined image sampler for resource named "
1487 << sampled_image.name << ".";
1488 proto.docstring = stream.str();
1489 }
1490 proto.args.push_back(BindPrototypeArgument{
1491 .type_name = "ResourceBinder&",
1492 .argument_name = "command",
1493 });
1494 proto.args.push_back(BindPrototypeArgument{
1495 .type_name = "std::shared_ptr<const Texture>",
1496 .argument_name = "texture",
1497 });
1498 proto.args.push_back(BindPrototypeArgument{
1499 .type_name = "raw_ptr<const Sampler>",
1500 .argument_name = "sampler",
1501 });
1502 }
1503 for (const auto& separate_image : resources.separate_images) {
1504 auto& proto = prototypes.emplace_back(BindPrototype{});
1505 proto.return_type = "bool";
1506 proto.name = ToCamelCase(separate_image.name);
1507 proto.descriptor_type = "DescriptorType::kImage";
1508 {
1509 std::stringstream stream;
1510 stream << "Bind separate image for resource named " << separate_image.name
1511 << ".";
1512 proto.docstring = stream.str();
1513 }
1514 proto.args.push_back(BindPrototypeArgument{
1515 .type_name = "Command&",
1516 .argument_name = "command",
1517 });
1518 proto.args.push_back(BindPrototypeArgument{
1519 .type_name = "std::shared_ptr<const Texture>",
1520 .argument_name = "texture",
1521 });
1522 }
1523 for (const auto& separate_sampler : resources.separate_samplers) {
1524 auto& proto = prototypes.emplace_back(BindPrototype{});
1525 proto.return_type = "bool";
1526 proto.name = ToCamelCase(separate_sampler.name);
1527 proto.descriptor_type = "DescriptorType::kSampler";
1528 {
1529 std::stringstream stream;
1530 stream << "Bind separate sampler for resource named "
1531 << separate_sampler.name << ".";
1532 proto.docstring = stream.str();
1533 }
1534 proto.args.push_back(BindPrototypeArgument{
1535 .type_name = "Command&",
1536 .argument_name = "command",
1537 });
1538 proto.args.push_back(BindPrototypeArgument{
1539 .type_name = "std::shared_ptr<const Sampler>",
1540 .argument_name = "sampler",
1541 });
1542 }
1543 return prototypes;
1544}
1545
1546nlohmann::json::array_t Reflector::EmitBindPrototypes(
1547 const spirv_cross::ShaderResources& resources,
1548 spv::ExecutionModel execution_model) const {
1549 const auto prototypes = ReflectBindPrototypes(resources, execution_model);
1550 nlohmann::json::array_t result;
1551 for (const auto& res : prototypes) {
1552 auto& item = result.emplace_back(nlohmann::json::object_t{});
1553 item["return_type"] = res.return_type;
1554 item["name"] = res.name;
1555 item["docstring"] = res.docstring;
1556 item["descriptor_type"] = res.descriptor_type;
1557 auto& args = item["args"] = nlohmann::json::array_t{};
1558 for (const auto& arg : res.args) {
1559 auto& json_arg = args.emplace_back(nlohmann::json::object_t{});
1560 json_arg["type_name"] = arg.type_name;
1561 json_arg["argument_name"] = arg.argument_name;
1562 }
1563 }
1564 return result;
1565}
1566
1567} // namespace compiler
1568} // namespace impeller
GLenum type
static const char * kVulkanUBOName
Reflector(Options options, const std::shared_ptr< const spirv_cross::ParsedIR > &ir, const std::shared_ptr< fml::Mapping > &shader_data, const CompilerBackend &compiler)
Definition reflector.cc:64
std::shared_ptr< fml::Mapping > GetReflectionJSON() const
Definition reflector.cc:110
std::shared_ptr< fml::Mapping > GetReflectionCC() const
Definition reflector.cc:127
std::shared_ptr< RuntimeStageData::Shader > GetRuntimeStageShaderData() const
Definition reflector.cc:131
std::shared_ptr< ShaderBundleData > GetShaderBundleData() const
Definition reflector.cc:136
std::shared_ptr< fml::Mapping > GetReflectionHeader() const
Definition reflector.cc:123
static int input(yyscan_t yyscanner)
uint32_t location
int32_t value
FlutterVulkanImage * image
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
#define FML_CHECK(condition)
Definition logging.h:104
#define FML_UNREACHABLE()
Definition logging.h:128
const char * name
Definition fuchsia.cc:49
Vector2 padding
The halo padding in source space.
std::array< MockImage, 3 > images
it will be possible to load the file into Perfetto s trace viewer use test Running tests that layout and measure text will not yield consistent results across various platforms Enabling this option will make font resolution default to the Ahem test font on all disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
static std::string TypeNameWithPaddingOfSize(size_t size)
Definition reflector.cc:788
static VertexType VertexTypeFromInputResource(const spirv_cross::Compiler &compiler, const spirv_cross::Resource *resource)
static std::string ToString(CompilerBackend::Type type)
Definition reflector.cc:620
static std::optional< RuntimeStageBackend > GetRuntimeStageBackend(TargetPlatform target_platform)
Definition reflector.cc:317
static size_t GetReflectedStructSize(const std::vector< StructMember > &members)
Get the reflected struct size. In the vast majority of the cases, this is the same as the declared st...
Definition reflector.cc:846
static std::string ExecutionModelToString(spv::ExecutionModel model)
Definition reflector.cc:35
static std::string StringToShaderStage(const std::string &str)
Definition reflector.cc:48
bool TargetPlatformIsMetal(TargetPlatform platform)
Definition types.cc:277
constexpr std::string_view kReflectionHeaderTemplate
std::string ToCamelCase(std::string_view string)
Definition utilities.cc:38
constexpr std::string_view kReflectionCCTemplate
static std::optional< KnownType > ReadKnownScalarType(spirv_cross::SPIRType::BaseType type)
Definition reflector.cc:799
bool StringStartsWith(const std::string &target, const std::string &prefix)
Definition utilities.cc:86
std::vector< spirv_cross::ID > SortUniforms(const spirv_cross::ParsedIR *ir, const spirv_cross::Compiler *compiler, std::optional< spirv_cross::SPIRType::BaseType > type_filter, bool include)
Sorts uniform declarations in an IR according to decoration order.
float Scalar
Definition scalar.h:19
TPoint< Scalar > Point
Definition point.h:425
TPoint< int32_t > IPoint32
Definition point.h:427
TPoint< uint32_t > UintPoint32
Definition point.h:428
Definition ref_ptr.h:261
A storage only class for half precision floating point.
Definition half.h:41
spirv_cross::Compiler * GetCompiler()
uint32_t GetExtendedMSLResourceBinding(ExtendedResourceIndex index, spirv_cross::ID id) const
static std::string BaseTypeToString(spirv_cross::SPIRType::BaseType type)
Definition reflector.h:44
spirv_cross::SPIRType::BaseType base_type
std::shared_ptr< const fml::Mapping > data
#define VALIDATION_LOG
Definition validation.h:91