Flutter Engine Uber Docs
Docs for the entire Flutter Engine repo.
 
Loading...
Searching...
No Matches
reflector.cc
Go to the documentation of this file.
1// Copyright 2013 The Flutter Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// FLUTTER_NOLINT: https://github.com/flutter/flutter/issues/105732
6
8
9#include <atomic>
10#include <format>
11#include <optional>
12#include <set>
13#include <sstream>
14
15#include "flutter/fml/logging.h"
16#include "fml/backtrace.h"
29#include "spirv_common.hpp"
30
31namespace impeller {
32namespace compiler {
33
34static std::string ExecutionModelToString(spv::ExecutionModel model) {
35 switch (model) {
36 case spv::ExecutionModel::ExecutionModelVertex:
37 return "vertex";
38 case spv::ExecutionModel::ExecutionModelFragment:
39 return "fragment";
40 case spv::ExecutionModel::ExecutionModelGLCompute:
41 return "compute";
42 default:
43 return "unsupported";
44 }
45}
46
47static std::string StringToShaderStage(const std::string& str) {
48 if (str == "vertex") {
49 return "ShaderStage::kVertex";
50 }
51
52 if (str == "fragment") {
53 return "ShaderStage::kFragment";
54 }
55
56 if (str == "compute") {
57 return "ShaderStage::kCompute";
58 }
59
60 return "ShaderStage::kUnknown";
61}
62
64 const std::shared_ptr<const spirv_cross::ParsedIR>& ir,
65 const std::shared_ptr<fml::Mapping>& shader_data,
66 const CompilerBackend& compiler)
67 : options_(std::move(options)),
68 ir_(ir),
69 shader_data_(shader_data),
70 compiler_(compiler) {
71 if (!ir_ || !compiler_) {
72 return;
73 }
74
75 if (auto template_arguments = GenerateTemplateArguments();
76 template_arguments.has_value()) {
77 template_arguments_ =
78 std::make_unique<nlohmann::json>(std::move(template_arguments.value()));
79 } else {
80 return;
81 }
82
83 reflection_header_ = GenerateReflectionHeader();
84 if (!reflection_header_) {
85 return;
86 }
87
88 reflection_cc_ = GenerateReflectionCC();
89 if (!reflection_cc_) {
90 return;
91 }
92
93 runtime_stage_shader_ = GenerateRuntimeStageData();
94
95 shader_bundle_data_ = GenerateShaderBundleData();
96 if (!shader_bundle_data_) {
97 return;
98 }
99
100 is_valid_ = true;
101}
102
103Reflector::~Reflector() = default;
104
105bool Reflector::IsValid() const {
106 return is_valid_;
107}
108
109std::shared_ptr<fml::Mapping> Reflector::GetReflectionJSON() const {
110 if (!is_valid_) {
111 return nullptr;
112 }
113
114 auto json_string =
115 std::make_shared<std::string>(template_arguments_->dump(2u));
116
117 return std::make_shared<fml::NonOwnedMapping>(
118 reinterpret_cast<const uint8_t*>(json_string->data()),
119 json_string->size(), [json_string](auto, auto) {});
120}
121
122std::shared_ptr<fml::Mapping> Reflector::GetReflectionHeader() const {
123 return reflection_header_;
124}
125
126std::shared_ptr<fml::Mapping> Reflector::GetReflectionCC() const {
127 return reflection_cc_;
128}
129
130std::shared_ptr<RuntimeStageData::Shader> Reflector::GetRuntimeStageShaderData()
131 const {
132 return runtime_stage_shader_;
133}
134
135std::shared_ptr<ShaderBundleData> Reflector::GetShaderBundleData() const {
136 return shader_bundle_data_;
137}
138
139std::optional<nlohmann::json> Reflector::GenerateTemplateArguments() const {
140 nlohmann::json root;
141
142 const auto& entrypoints = compiler_->get_entry_points_and_stages();
143 if (entrypoints.size() != 1) {
144 VALIDATION_LOG << "Incorrect number of entrypoints in the shader. Found "
145 << entrypoints.size() << " but expected 1.";
146 return std::nullopt;
147 }
148
149 auto execution_model = entrypoints.front().execution_model;
150 {
151 root["entrypoint"] = options_.entry_point_name;
152 root["shader_name"] = options_.shader_name;
153 root["shader_stage"] = ExecutionModelToString(execution_model);
154 root["header_file_name"] = options_.header_file_name;
155 }
156
157 const auto shader_resources = compiler_->get_shader_resources();
158
159 // Subpass Inputs.
160 {
161 auto& subpass_inputs = root["subpass_inputs"] = nlohmann::json::array_t{};
162 if (auto subpass_inputs_json =
163 ReflectResources(shader_resources.subpass_inputs);
164 subpass_inputs_json.has_value()) {
165 for (auto subpass_input : subpass_inputs_json.value()) {
166 subpass_input["descriptor_type"] = "DescriptorType::kInputAttachment";
167 subpass_inputs.emplace_back(std::move(subpass_input));
168 }
169 } else {
170 return std::nullopt;
171 }
172 }
173
174 // Uniform and storage buffers.
175 {
176 auto& buffers = root["buffers"] = nlohmann::json::array_t{};
177 if (auto uniform_buffers_json =
178 ReflectResources(shader_resources.uniform_buffers);
179 uniform_buffers_json.has_value()) {
180 for (auto uniform_buffer : uniform_buffers_json.value()) {
181 uniform_buffer["descriptor_type"] = "DescriptorType::kUniformBuffer";
182 buffers.emplace_back(std::move(uniform_buffer));
183 }
184 } else {
185 return std::nullopt;
186 }
187 if (auto storage_buffers_json =
188 ReflectResources(shader_resources.storage_buffers);
189 storage_buffers_json.has_value()) {
190 for (auto uniform_buffer : storage_buffers_json.value()) {
191 uniform_buffer["descriptor_type"] = "DescriptorType::kStorageBuffer";
192 buffers.emplace_back(std::move(uniform_buffer));
193 }
194 } else {
195 return std::nullopt;
196 }
197 }
198
199 {
200 auto& uniforms = root["uniforms"] = nlohmann::json::array_t{};
201 if (auto uniforms_json =
202 ReflectResources(shader_resources.gl_plain_uniforms);
203 uniforms_json.has_value()) {
204 for (auto uniform : uniforms_json.value()) {
205 uniform["descriptor_type"] = "DescriptorType::kUniform";
206 uniforms.emplace_back(std::move(uniform));
207 }
208 } else {
209 return std::nullopt;
210 }
211 }
212
213 {
214 auto& stage_inputs = root["stage_inputs"] = nlohmann::json::array_t{};
215 if (auto stage_inputs_json = ReflectResources(
216 shader_resources.stage_inputs,
217 /*compute_offsets=*/execution_model == spv::ExecutionModelVertex);
218 stage_inputs_json.has_value()) {
219 stage_inputs = std::move(stage_inputs_json.value());
220 } else {
221 return std::nullopt;
222 }
223 }
224
225 {
226 auto combined_sampled_images =
227 ReflectResources(shader_resources.sampled_images);
228 auto images = ReflectResources(shader_resources.separate_images);
229 auto samplers = ReflectResources(shader_resources.separate_samplers);
230 if (!combined_sampled_images.has_value() || !images.has_value() ||
231 !samplers.has_value()) {
232 return std::nullopt;
233 }
234 auto& sampled_images = root["sampled_images"] = nlohmann::json::array_t{};
235 for (auto value : combined_sampled_images.value()) {
236 value["descriptor_type"] = "DescriptorType::kSampledImage";
237 sampled_images.emplace_back(std::move(value));
238 }
239 for (auto value : images.value()) {
240 value["descriptor_type"] = "DescriptorType::kImage";
241 sampled_images.emplace_back(std::move(value));
242 }
243 for (auto value : samplers.value()) {
244 value["descriptor_type"] = "DescriptorType::kSampledSampler";
245 sampled_images.emplace_back(std::move(value));
246 }
247 }
248
249 if (auto stage_outputs = ReflectResources(shader_resources.stage_outputs);
250 stage_outputs.has_value()) {
251 root["stage_outputs"] = std::move(stage_outputs.value());
252 } else {
253 return std::nullopt;
254 }
255
256 {
257 auto& struct_definitions = root["struct_definitions"] =
258 nlohmann::json::array_t{};
259 if (entrypoints.front().execution_model ==
260 spv::ExecutionModel::ExecutionModelVertex &&
261 !shader_resources.stage_inputs.empty()) {
262 if (auto struc =
263 ReflectPerVertexStructDefinition(shader_resources.stage_inputs);
264 struc.has_value()) {
265 struct_definitions.emplace_back(EmitStructDefinition(struc.value()));
266 } else {
267 // If there are stage inputs, it is an error to not generate a per
268 // vertex data struct for a vertex like shader stage.
269 return std::nullopt;
270 }
271 }
272
273 std::set<spirv_cross::ID> known_structs;
274 ir_->for_each_typed_id<spirv_cross::SPIRType>(
275 [&](uint32_t, const spirv_cross::SPIRType& type) {
276 if (type.basetype != spirv_cross::SPIRType::BaseType::Struct) {
277 return;
278 }
279 // Skip structs that do not have layout offset decorations.
280 // These structs are used internally within the shader and are not
281 // part of the shader's interface.
282 for (size_t i = 0; i < type.member_types.size(); i++) {
283 if (!compiler_->has_member_decoration(type.self, i,
284 spv::DecorationOffset)) {
285 return;
286 }
287 }
288 if (known_structs.find(type.self) != known_structs.end()) {
289 // Iterating over types this way leads to duplicates which may cause
290 // duplicate struct definitions.
291 return;
292 }
293 known_structs.insert(type.self);
294 if (auto struc = ReflectStructDefinition(type.self);
295 struc.has_value()) {
296 struct_definitions.emplace_back(
297 EmitStructDefinition(struc.value()));
298 }
299 });
300 }
301
302 root["bind_prototypes"] =
303 EmitBindPrototypes(shader_resources, execution_model);
304
305 return root;
306}
307
308std::shared_ptr<fml::Mapping> Reflector::GenerateReflectionHeader() const {
309 return InflateTemplate(kReflectionHeaderTemplate);
310}
311
312std::shared_ptr<fml::Mapping> Reflector::GenerateReflectionCC() const {
313 return InflateTemplate(kReflectionCCTemplate);
314}
315
339
340std::shared_ptr<RuntimeStageData::Shader> Reflector::GenerateRuntimeStageData()
341 const {
342 auto backend = GetRuntimeStageBackend(options_.target_platform);
343 if (!backend.has_value()) {
344 return nullptr;
345 }
346
347 const auto& entrypoints = compiler_->get_entry_points_and_stages();
348 if (entrypoints.size() != 1u) {
349 VALIDATION_LOG << "Single entrypoint not found.";
350 return nullptr;
351 }
352 auto data = std::make_unique<RuntimeStageData::Shader>();
353 data->entrypoint = options_.entry_point_name;
354 data->stage = entrypoints.front().execution_model;
355 data->shader = shader_data_;
356 data->backend = backend.value();
357
358 // Sort the IR so that the uniforms are in declaration order.
359 std::vector<spirv_cross::ID> uniforms =
360 SortUniforms(ir_.get(), compiler_.GetCompiler());
361 for (auto& sorted_id : uniforms) {
362 auto var = ir_->ids[sorted_id].get<spirv_cross::SPIRVariable>();
363 const auto spir_type = compiler_->get_type(var.basetype);
364 UniformDescription uniform_description;
365 uniform_description.name = compiler_->get_name(var.self);
366 uniform_description.location = compiler_->get_decoration(
367 var.self, spv::Decoration::DecorationLocation);
368 uniform_description.binding =
369 compiler_->get_decoration(var.self, spv::Decoration::DecorationBinding);
370 uniform_description.type = spir_type.basetype;
371 uniform_description.rows = spir_type.vecsize;
372 uniform_description.columns = spir_type.columns;
373 uniform_description.bit_width = spir_type.width;
374 uniform_description.array_elements = GetArrayElements(spir_type);
376 spir_type.basetype ==
377 spirv_cross::SPIRType::BaseType::SampledImage)
378 << "Vulkan runtime effect had unexpected uniforms outside of the "
379 "uniform buffer object.";
380 data->uniforms.emplace_back(std::move(uniform_description));
381 }
382
383 const auto ubos = compiler_->get_shader_resources().uniform_buffers;
384 if (data->backend == RuntimeStageBackend::kVulkan && !ubos.empty()) {
385 if (ubos.size() != 1 && ubos[0].name != RuntimeStage::kVulkanUBOName) {
386 VALIDATION_LOG << "Expected a single UBO resource named "
387 "'"
389 << "' "
390 "for Vulkan runtime stage backend.";
391 return nullptr;
392 }
393
394 const auto& ubo = ubos[0];
395
396 size_t binding =
397 compiler_->get_decoration(ubo.id, spv::Decoration::DecorationBinding);
398 auto members = ReadStructMembers(ubo.type_id);
399 std::vector<uint8_t> struct_layout;
400 size_t float_count = 0;
401
402 for (size_t i = 0; i < members.size(); i += 1) {
403 const auto& member = members[i];
404 std::vector<int> bytes;
405 switch (member.underlying_type) {
407 size_t padding_count =
408 (member.size + sizeof(float) - 1) / sizeof(float);
409 while (padding_count > 0) {
410 struct_layout.push_back(0);
411 padding_count--;
412 }
413 break;
414 }
416 if (member.array_elements > 1) {
417 // For each array element member, insert 1 layout property per byte
418 // and 0 layout property per byte of padding
419 for (auto i = 0; i < member.array_elements; i++) {
420 for (auto j = 0u; j < member.size / sizeof(float); j++) {
421 struct_layout.push_back(1);
422 }
423 for (auto j = 0u; j < member.element_padding / sizeof(float);
424 j++) {
425 struct_layout.push_back(0);
426 }
427 }
428 } else {
429 size_t member_float_count = member.byte_length / sizeof(float);
430 float_count += member_float_count;
431 while (member_float_count > 0) {
432 struct_layout.push_back(1);
433 member_float_count--;
434 }
435 }
436 break;
437 }
439 VALIDATION_LOG << "Non-floating-type struct member " << member.name
440 << " is not supported.";
441 return nullptr;
442 }
443 }
444 data->uniforms.emplace_back(UniformDescription{
445 .name = ubo.name,
446 .location = binding,
447 .binding = binding,
448 .type = spirv_cross::SPIRType::Struct,
449 .struct_layout = std::move(struct_layout),
450 .struct_float_count = float_count,
451 });
452 }
453
454 // We only need to worry about storing vertex attributes.
455 if (entrypoints.front().execution_model == spv::ExecutionModelVertex) {
456 const auto inputs = compiler_->get_shader_resources().stage_inputs;
457 auto input_offsets = ComputeOffsets(inputs);
458 for (const auto& input : inputs) {
459 std::optional<size_t> offset = GetOffset(input.id, input_offsets);
460
461 const auto type = compiler_->get_type(input.type_id);
462
463 InputDescription input_description;
464 input_description.name = input.name;
465 input_description.location = compiler_->get_decoration(
466 input.id, spv::Decoration::DecorationLocation);
467 input_description.set = compiler_->get_decoration(
468 input.id, spv::Decoration::DecorationDescriptorSet);
469 input_description.binding = compiler_->get_decoration(
470 input.id, spv::Decoration::DecorationBinding);
471 input_description.type = type.basetype;
472 input_description.bit_width = type.width;
473 input_description.vec_size = type.vecsize;
474 input_description.columns = type.columns;
475 input_description.offset = offset.value_or(0u);
476 data->inputs.emplace_back(std::move(input_description));
477 }
478 }
479
480 return data;
481}
482
483std::shared_ptr<ShaderBundleData> Reflector::GenerateShaderBundleData() const {
484 const auto& entrypoints = compiler_->get_entry_points_and_stages();
485 if (entrypoints.size() != 1u) {
486 VALIDATION_LOG << "Single entrypoint not found.";
487 return nullptr;
488 }
489 auto data = std::make_shared<ShaderBundleData>(
490 options_.entry_point_name, //
491 entrypoints.front().execution_model, //
492 options_.target_platform //
493 );
494 data->SetShaderData(shader_data_);
495
496 const auto uniforms = compiler_->get_shader_resources().uniform_buffers;
497 for (const auto& uniform : uniforms) {
498 ShaderBundleData::ShaderUniformStruct uniform_struct;
499 uniform_struct.name = uniform.name;
500 uniform_struct.ext_res_0 = compiler_.GetExtendedMSLResourceBinding(
502 uniform_struct.set = compiler_->get_decoration(
503 uniform.id, spv::Decoration::DecorationDescriptorSet);
504 uniform_struct.binding = compiler_->get_decoration(
505 uniform.id, spv::Decoration::DecorationBinding);
506
507 const auto type = compiler_->get_type(uniform.type_id);
508 if (type.basetype != spirv_cross::SPIRType::BaseType::Struct) {
509 std::cerr << "Error: Uniform \"" << uniform.name
510 << "\" is not a struct. All Flutter GPU shader uniforms must "
511 "be structs."
512 << std::endl;
513 return nullptr;
514 }
515
516 size_t size_in_bytes = 0;
517 for (const auto& struct_member : ReadStructMembers(uniform.type_id)) {
518 size_in_bytes += struct_member.byte_length;
519 if (StringStartsWith(struct_member.name, "_PADDING_")) {
520 continue;
521 }
522 ShaderBundleData::ShaderUniformStructField uniform_struct_field;
523 uniform_struct_field.name = struct_member.name;
524 uniform_struct_field.type = struct_member.base_type;
525 uniform_struct_field.offset_in_bytes = struct_member.offset;
526 uniform_struct_field.element_size_in_bytes = struct_member.size;
527 uniform_struct_field.total_size_in_bytes = struct_member.byte_length;
528 uniform_struct_field.array_elements = struct_member.array_elements;
529 uniform_struct.fields.push_back(uniform_struct_field);
530 }
531 uniform_struct.size_in_bytes = size_in_bytes;
532
533 data->AddUniformStruct(uniform_struct);
534 }
535
536 const auto sampled_images = compiler_->get_shader_resources().sampled_images;
537 for (const auto& image : sampled_images) {
538 ShaderBundleData::ShaderUniformTexture uniform_texture;
539 uniform_texture.name = image.name;
540 uniform_texture.ext_res_0 = compiler_.GetExtendedMSLResourceBinding(
542 uniform_texture.set = compiler_->get_decoration(
543 image.id, spv::Decoration::DecorationDescriptorSet);
544 uniform_texture.binding =
545 compiler_->get_decoration(image.id, spv::Decoration::DecorationBinding);
546 data->AddUniformTexture(uniform_texture);
547 }
548
549 // We only need to worry about storing vertex attributes.
550 if (entrypoints.front().execution_model == spv::ExecutionModelVertex) {
551 const auto inputs = compiler_->get_shader_resources().stage_inputs;
552 auto input_offsets = ComputeOffsets(inputs);
553 for (const auto& input : inputs) {
554 std::optional<size_t> offset = GetOffset(input.id, input_offsets);
555
556 const auto type = compiler_->get_type(input.type_id);
557
558 InputDescription input_description;
559 input_description.name = input.name;
560 input_description.location = compiler_->get_decoration(
561 input.id, spv::Decoration::DecorationLocation);
562 input_description.set = compiler_->get_decoration(
563 input.id, spv::Decoration::DecorationDescriptorSet);
564 input_description.binding = compiler_->get_decoration(
565 input.id, spv::Decoration::DecorationBinding);
566 input_description.type = type.basetype;
567 input_description.bit_width = type.width;
568 input_description.vec_size = type.vecsize;
569 input_description.columns = type.columns;
570 input_description.offset = offset.value_or(0u);
571 data->AddInputDescription(std::move(input_description));
572 }
573 }
574
575 return data;
576}
577
578std::optional<uint32_t> Reflector::GetArrayElements(
579 const spirv_cross::SPIRType& type) const {
580 if (type.array.empty()) {
581 return std::nullopt;
582 }
583 FML_CHECK(type.array.size() == 1)
584 << "Multi-dimensional arrays are not supported.";
585 FML_CHECK(type.array_size_literal.front())
586 << "Must use a literal for array sizes.";
587 return type.array.front();
588}
589
591 switch (type) {
593 return "Metal Shading Language";
595 return "OpenGL Shading Language";
597 return "OpenGL Shading Language (Relaxed Vulkan Semantics)";
599 return "SkSL Shading Language";
600 }
602}
603
604std::shared_ptr<fml::Mapping> Reflector::InflateTemplate(
605 std::string_view tmpl) const {
606 inja::Environment env;
607 env.set_trim_blocks(true);
608 env.set_lstrip_blocks(true);
609
610 env.add_callback("camel_case", 1u, [](inja::Arguments& args) {
611 return ToCamelCase(args.at(0u)->get<std::string>());
612 });
613
614 env.add_callback("to_shader_stage", 1u, [](inja::Arguments& args) {
615 return StringToShaderStage(args.at(0u)->get<std::string>());
616 });
617
618 env.add_callback("get_generator_name", 0u,
619 [type = compiler_.GetType()](inja::Arguments& args) {
620 return ToString(type);
621 });
622
623 auto inflated_template =
624 std::make_shared<std::string>(env.render(tmpl, *template_arguments_));
625
626 return std::make_shared<fml::NonOwnedMapping>(
627 reinterpret_cast<const uint8_t*>(inflated_template->data()),
628 inflated_template->size(), [inflated_template](auto, auto) {});
629}
630
631std::vector<size_t> Reflector::ComputeOffsets(
632 const spirv_cross::SmallVector<spirv_cross::Resource>& resources) const {
633 std::vector<size_t> offsets(resources.size(), 0);
634 if (resources.size() == 0) {
635 return offsets;
636 }
637 for (const auto& resource : resources) {
638 const auto type = compiler_->get_type(resource.type_id);
639 auto location = compiler_->get_decoration(
640 resource.id, spv::Decoration::DecorationLocation);
641 // Malformed shader, will be caught later on.
642 if (location >= resources.size() || location < 0) {
643 location = 0;
644 }
645 offsets[location] = (type.width * type.vecsize) / 8;
646 }
647 for (size_t i = 1; i < resources.size(); i++) {
648 offsets[i] += offsets[i - 1];
649 }
650 for (size_t i = resources.size() - 1; i > 0; i--) {
651 offsets[i] = offsets[i - 1];
652 }
653 offsets[0] = 0;
654
655 return offsets;
656}
657
658std::optional<size_t> Reflector::GetOffset(
659 spirv_cross::ID id,
660 const std::vector<size_t>& offsets) const {
661 uint32_t location =
662 compiler_->get_decoration(id, spv::Decoration::DecorationLocation);
663 if (location >= offsets.size()) {
664 return std::nullopt;
665 }
666 return offsets[location];
667}
668
669std::optional<nlohmann::json::object_t> Reflector::ReflectResource(
670 const spirv_cross::Resource& resource,
671 std::optional<size_t> offset) const {
672 nlohmann::json::object_t result;
673
674 result["name"] = resource.name;
675 result["descriptor_set"] = compiler_->get_decoration(
676 resource.id, spv::Decoration::DecorationDescriptorSet);
677 result["binding"] = compiler_->get_decoration(
678 resource.id, spv::Decoration::DecorationBinding);
679 result["set"] = compiler_->get_decoration(
680 resource.id, spv::Decoration::DecorationDescriptorSet);
681 result["location"] = compiler_->get_decoration(
682 resource.id, spv::Decoration::DecorationLocation);
683 result["index"] =
684 compiler_->get_decoration(resource.id, spv::Decoration::DecorationIndex);
685 result["ext_res_0"] = compiler_.GetExtendedMSLResourceBinding(
687 result["ext_res_1"] = compiler_.GetExtendedMSLResourceBinding(
689 result["relaxed_precision"] =
690 compiler_->get_decoration(
691 resource.id, spv::Decoration::DecorationRelaxedPrecision) == 1;
692 result["offset"] = offset.value_or(0u);
693 auto type = ReflectType(resource.type_id);
694 if (!type.has_value()) {
695 return std::nullopt;
696 }
697 result["type"] = std::move(type.value());
698 return result;
699}
700
701std::optional<nlohmann::json::object_t> Reflector::ReflectType(
702 const spirv_cross::TypeID& type_id) const {
703 nlohmann::json::object_t result;
704
705 const auto type = compiler_->get_type(type_id);
706
707 result["type_name"] = StructMember::BaseTypeToString(type.basetype);
708 result["bit_width"] = type.width;
709 result["vec_size"] = type.vecsize;
710 result["columns"] = type.columns;
711 auto& members = result["members"] = nlohmann::json::array_t{};
712 if (type.basetype == spirv_cross::SPIRType::BaseType::Struct) {
713 for (const auto& struct_member : ReadStructMembers(type_id)) {
714 auto member = nlohmann::json::object_t{};
715 member["name"] = struct_member.name;
716 member["type"] = struct_member.type;
717 member["base_type"] =
718 StructMember::BaseTypeToString(struct_member.base_type);
719 member["offset"] = struct_member.offset;
720 member["size"] = struct_member.size;
721 member["byte_length"] = struct_member.byte_length;
722 if (struct_member.array_elements.has_value()) {
723 member["array_elements"] = struct_member.array_elements.value();
724 } else {
725 member["array_elements"] = "std::nullopt";
726 }
727 members.emplace_back(std::move(member));
728 }
729 }
730
731 return result;
732}
733
734std::optional<nlohmann::json::array_t> Reflector::ReflectResources(
735 const spirv_cross::SmallVector<spirv_cross::Resource>& resources,
736 bool compute_offsets) const {
737 nlohmann::json::array_t result;
738 result.reserve(resources.size());
739 std::vector<size_t> offsets;
740 if (compute_offsets) {
741 offsets = ComputeOffsets(resources);
742 }
743 for (const auto& resource : resources) {
744 std::optional<size_t> maybe_offset = std::nullopt;
745 if (compute_offsets) {
746 maybe_offset = GetOffset(resource.id, offsets);
747 }
748 if (auto reflected = ReflectResource(resource, maybe_offset);
749 reflected.has_value()) {
750 result.emplace_back(std::move(reflected.value()));
751 } else {
752 return std::nullopt;
753 }
754 }
755 return result;
756}
757
758static std::string TypeNameWithPaddingOfSize(size_t size) {
759 std::stringstream stream;
760 stream << "Padding<" << size << ">";
761 return stream.str();
762}
763
764struct KnownType {
765 std::string name;
766 size_t byte_size = 0;
767};
768
769static std::optional<KnownType> ReadKnownScalarType(
770 spirv_cross::SPIRType::BaseType type) {
771 switch (type) {
772 case spirv_cross::SPIRType::BaseType::Boolean:
773 return KnownType{
774 .name = "bool",
775 .byte_size = sizeof(bool),
776 };
777 case spirv_cross::SPIRType::BaseType::Float:
778 return KnownType{
779 .name = "Scalar",
780 .byte_size = sizeof(Scalar),
781 };
782 case spirv_cross::SPIRType::BaseType::Half:
783 return KnownType{
784 .name = "Half",
785 .byte_size = sizeof(Half),
786 };
787 case spirv_cross::SPIRType::BaseType::UInt:
788 return KnownType{
789 .name = "uint32_t",
790 .byte_size = sizeof(uint32_t),
791 };
792 case spirv_cross::SPIRType::BaseType::Int:
793 return KnownType{
794 .name = "int32_t",
795 .byte_size = sizeof(int32_t),
796 };
797 default:
798 break;
799 }
800 return std::nullopt;
801}
802
803//------------------------------------------------------------------------------
804/// @brief Get the reflected struct size. In the vast majority of the
805/// cases, this is the same as the declared struct size as given by
806/// the compiler. But, additional padding may need to be introduced
807/// after the end of the struct to keep in line with the alignment
808/// requirement of the individual struct members. This method
809/// figures out the actual size of the reflected struct that can be
810/// referenced in native code.
811///
812/// @param[in] members The members
813///
814/// @return The reflected structure size.
815///
816static size_t GetReflectedStructSize(const std::vector<StructMember>& members) {
817 auto struct_size = 0u;
818 for (const auto& member : members) {
819 struct_size += member.byte_length;
820 }
821 return struct_size;
822}
823
824std::vector<StructMember> Reflector::ReadStructMembers(
825 const spirv_cross::TypeID& type_id) const {
826 const auto& struct_type = compiler_->get_type(type_id);
827 FML_CHECK(struct_type.basetype == spirv_cross::SPIRType::BaseType::Struct);
828
829 std::vector<StructMember> result;
830
831 size_t current_byte_offset = 0;
832 size_t max_member_alignment = 0;
833
834 for (size_t i = 0; i < struct_type.member_types.size(); i++) {
835 const auto& member = compiler_->get_type(struct_type.member_types[i]);
836 const auto struct_member_offset =
837 compiler_->type_struct_member_offset(struct_type, i);
838 auto array_elements = GetArrayElements(member);
839
840 if (struct_member_offset > current_byte_offset) {
841 const auto alignment_pad = struct_member_offset - current_byte_offset;
842 result.emplace_back(StructMember{
843 TypeNameWithPaddingOfSize(alignment_pad), // type
844 spirv_cross::SPIRType::BaseType::Void, // basetype
845 std::format("_PADDING_{}_",
846 GetMemberNameAtIndex(struct_type, i)), // name
847 current_byte_offset, // offset
848 alignment_pad, // size
849 alignment_pad, // byte_length
850 std::nullopt, // array_elements
851 0, // element_padding
852 });
853 current_byte_offset += alignment_pad;
854 }
855
856 max_member_alignment =
857 std::max<size_t>(max_member_alignment,
858 (member.width / 8) * member.columns * member.vecsize);
859
860 FML_CHECK(current_byte_offset == struct_member_offset);
861
862 // A user defined struct.
863 if (member.basetype == spirv_cross::SPIRType::BaseType::Struct) {
864 const size_t size =
865 GetReflectedStructSize(ReadStructMembers(member.self));
866 uint32_t stride = GetArrayStride<0>(struct_type, member, i);
867 if (stride == 0) {
868 stride = size;
869 }
870 uint32_t element_padding = stride - size;
871 result.emplace_back(StructMember{
872 compiler_->get_name(member.self), // type
873 member.basetype, // basetype
874 GetMemberNameAtIndex(struct_type, i), // name
875 struct_member_offset, // offset
876 size, // size
877 stride * array_elements.value_or(1), // byte_length
878 array_elements, // array_elements
879 element_padding, // element_padding
880 });
881 current_byte_offset += stride * array_elements.value_or(1);
882 continue;
883 }
884
885 // Tightly packed 4x4 Matrix is special cased as we know how to work with
886 // those.
887 if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
888 member.width == sizeof(Scalar) * 8 && //
889 member.columns == 4 && //
890 member.vecsize == 4 //
891 ) {
892 uint32_t stride = GetArrayStride<sizeof(Matrix)>(struct_type, member, i);
893 uint32_t element_padding = stride - sizeof(Matrix);
894 result.emplace_back(StructMember{
895 "Matrix", // type
896 member.basetype, // basetype
897 GetMemberNameAtIndex(struct_type, i), // name
898 struct_member_offset, // offset
899 sizeof(Matrix), // size
900 stride * array_elements.value_or(1), // byte_length
901 array_elements, // array_elements
902 element_padding, // element_padding
903 });
904 current_byte_offset += stride * array_elements.value_or(1);
905 continue;
906 }
907
908 // Tightly packed UintPoint32 (uvec2)
909 if (member.basetype == spirv_cross::SPIRType::BaseType::UInt && //
910 member.width == sizeof(uint32_t) * 8 && //
911 member.columns == 1 && //
912 member.vecsize == 2 //
913 ) {
914 uint32_t stride =
915 GetArrayStride<sizeof(UintPoint32)>(struct_type, member, i);
916 uint32_t element_padding = stride - sizeof(UintPoint32);
917 result.emplace_back(StructMember{
918 "UintPoint32", // type
919 member.basetype, // basetype
920 GetMemberNameAtIndex(struct_type, i), // name
921 struct_member_offset, // offset
922 sizeof(UintPoint32), // size
923 stride * array_elements.value_or(1), // byte_length
924 array_elements, // array_elements
925 element_padding, // element_padding
926 });
927 current_byte_offset += stride * array_elements.value_or(1);
928 continue;
929 }
930
931 // Tightly packed UintPoint32 (ivec2)
932 if (member.basetype == spirv_cross::SPIRType::BaseType::Int && //
933 member.width == sizeof(int32_t) * 8 && //
934 member.columns == 1 && //
935 member.vecsize == 2 //
936 ) {
937 uint32_t stride =
938 GetArrayStride<sizeof(IPoint32)>(struct_type, member, i);
939 uint32_t element_padding = stride - sizeof(IPoint32);
940 result.emplace_back(StructMember{
941 "IPoint32", // type
942 member.basetype, // basetype
943 GetMemberNameAtIndex(struct_type, i), // name
944 struct_member_offset, // offset
945 sizeof(IPoint32), // size
946 stride * array_elements.value_or(1), // byte_length
947 array_elements, // array_elements
948 element_padding, // element_padding
949 });
950 current_byte_offset += stride * array_elements.value_or(1);
951 continue;
952 }
953
954 // Tightly packed Point (vec2).
955 if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
956 member.width == sizeof(float) * 8 && //
957 member.columns == 1 && //
958 member.vecsize == 2 //
959 ) {
960 uint32_t stride = GetArrayStride<sizeof(Point)>(struct_type, member, i);
961 uint32_t element_padding = stride - sizeof(Point);
962 result.emplace_back(StructMember{
963 "Point", // type
964 member.basetype, // basetype
965 GetMemberNameAtIndex(struct_type, i), // name
966 struct_member_offset, // offset
967 sizeof(Point), // size
968 stride * array_elements.value_or(1), // byte_length
969 array_elements, // array_elements
970 element_padding, // element_padding
971 });
972 current_byte_offset += stride * array_elements.value_or(1);
973 continue;
974 }
975
976 // Tightly packed Vector3.
977 if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
978 member.width == sizeof(float) * 8 && //
979 member.columns == 1 && //
980 member.vecsize == 3 //
981 ) {
982 uint32_t stride = GetArrayStride<sizeof(Vector3)>(struct_type, member, i);
983 uint32_t element_padding = stride - sizeof(Vector3);
984 result.emplace_back(StructMember{
985 "Vector3", // type
986 member.basetype, // basetype
987 GetMemberNameAtIndex(struct_type, i), // name
988 struct_member_offset, // offset
989 sizeof(Vector3), // size
990 stride * array_elements.value_or(1), // byte_length
991 array_elements, // array_elements
992 element_padding, // element_padding
993 });
994 current_byte_offset += stride * array_elements.value_or(1);
995 continue;
996 }
997
998 // Tightly packed Vector4.
999 if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
1000 member.width == sizeof(float) * 8 && //
1001 member.columns == 1 && //
1002 member.vecsize == 4 //
1003 ) {
1004 uint32_t stride = GetArrayStride<sizeof(Vector4)>(struct_type, member, i);
1005 uint32_t element_padding = stride - sizeof(Vector4);
1006 result.emplace_back(StructMember{
1007 "Vector4", // type
1008 member.basetype, // basetype
1009 GetMemberNameAtIndex(struct_type, i), // name
1010 struct_member_offset, // offset
1011 sizeof(Vector4), // size
1012 stride * array_elements.value_or(1), // byte_length
1013 array_elements, // array_elements
1014 element_padding, // element_padding
1015 });
1016 current_byte_offset += stride * array_elements.value_or(1);
1017 continue;
1018 }
1019
1020 // Tightly packed half Point (vec2).
1021 if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
1022 member.width == sizeof(Half) * 8 && //
1023 member.columns == 1 && //
1024 member.vecsize == 2 //
1025 ) {
1026 uint32_t stride =
1027 GetArrayStride<sizeof(HalfVector2)>(struct_type, member, i);
1028 uint32_t element_padding = stride - sizeof(HalfVector2);
1029 result.emplace_back(StructMember{
1030 "HalfVector2", // type
1031 member.basetype, // basetype
1032 GetMemberNameAtIndex(struct_type, i), // name
1033 struct_member_offset, // offset
1034 sizeof(HalfVector2), // size
1035 stride * array_elements.value_or(1), // byte_length
1036 array_elements, // array_elements
1037 element_padding, // element_padding
1038 });
1039 current_byte_offset += stride * array_elements.value_or(1);
1040 continue;
1041 }
1042
1043 // Tightly packed Half Float Vector3.
1044 if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
1045 member.width == sizeof(Half) * 8 && //
1046 member.columns == 1 && //
1047 member.vecsize == 3 //
1048 ) {
1049 uint32_t stride =
1050 GetArrayStride<sizeof(HalfVector3)>(struct_type, member, i);
1051 uint32_t element_padding = stride - sizeof(HalfVector3);
1052 result.emplace_back(StructMember{
1053 "HalfVector3", // type
1054 member.basetype, // basetype
1055 GetMemberNameAtIndex(struct_type, i), // name
1056 struct_member_offset, // offset
1057 sizeof(HalfVector3), // size
1058 stride * array_elements.value_or(1), // byte_length
1059 array_elements, // array_elements
1060 element_padding, // element_padding
1061 });
1062 current_byte_offset += stride * array_elements.value_or(1);
1063 continue;
1064 }
1065
1066 // Tightly packed Half Float Vector4.
1067 if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
1068 member.width == sizeof(Half) * 8 && //
1069 member.columns == 1 && //
1070 member.vecsize == 4 //
1071 ) {
1072 uint32_t stride =
1073 GetArrayStride<sizeof(HalfVector4)>(struct_type, member, i);
1074 uint32_t element_padding = stride - sizeof(HalfVector4);
1075 result.emplace_back(StructMember{
1076 "HalfVector4", // type
1077 member.basetype, // basetype
1078 GetMemberNameAtIndex(struct_type, i), // name
1079 struct_member_offset, // offset
1080 sizeof(HalfVector4), // size
1081 stride * array_elements.value_or(1), // byte_length
1082 array_elements, // array_elements
1083 element_padding, // element_padding
1084 });
1085 current_byte_offset += stride * array_elements.value_or(1);
1086 continue;
1087 }
1088
1089 // Other isolated scalars (like bool, int, float/Scalar, etc..).
1090 {
1091 auto maybe_known_type = ReadKnownScalarType(member.basetype);
1092 if (maybe_known_type.has_value() && //
1093 member.columns == 1 && //
1094 member.vecsize == 1 //
1095 ) {
1096 uint32_t stride = GetArrayStride<0>(struct_type, member, i);
1097 if (stride == 0) {
1098 stride = maybe_known_type.value().byte_size;
1099 }
1100 uint32_t element_padding = stride - maybe_known_type.value().byte_size;
1101 // Add the type directly.
1102 result.emplace_back(StructMember{
1103 maybe_known_type.value().name, // type
1104 member.basetype, // basetype
1105 GetMemberNameAtIndex(struct_type, i), // name
1106 struct_member_offset, // offset
1107 maybe_known_type.value().byte_size, // size
1108 stride * array_elements.value_or(1), // byte_length
1109 array_elements, // array_elements
1110 element_padding, // element_padding
1111 });
1112 current_byte_offset += stride * array_elements.value_or(1);
1113 continue;
1114 }
1115 }
1116
1117 // Catch all for unknown types. Just add the necessary padding to the struct
1118 // and move on.
1119 {
1120 const size_t size = (member.width * member.columns * member.vecsize) / 8u;
1121 uint32_t stride = GetArrayStride<0>(struct_type, member, i);
1122 if (stride == 0) {
1123 stride = size;
1124 }
1125 auto element_padding = stride - size;
1126 result.emplace_back(StructMember{
1127 TypeNameWithPaddingOfSize(size), // type
1128 member.basetype, // basetype
1129 GetMemberNameAtIndex(struct_type, i), // name
1130 struct_member_offset, // offset
1131 size, // size
1132 stride * array_elements.value_or(1), // byte_length
1133 array_elements, // array_elements
1134 element_padding, // element_padding
1135 });
1136 current_byte_offset += stride * array_elements.value_or(1);
1137 continue;
1138 }
1139 }
1140
1141 if (max_member_alignment > 0u) {
1142 const auto struct_length = current_byte_offset;
1143 {
1144 const auto excess = struct_length % max_member_alignment;
1145 if (excess != 0) {
1146 const auto padding = max_member_alignment - excess;
1147 result.emplace_back(StructMember{
1149 spirv_cross::SPIRType::BaseType::Void, // basetype
1150 "_PADDING_", // name
1151 current_byte_offset, // offset
1152 padding, // size
1153 padding, // byte_length
1154 std::nullopt, // array_elements
1155 0, // element_padding
1156 });
1157 }
1158 }
1159 }
1160
1161 return result;
1162}
1163
1164std::optional<Reflector::StructDefinition> Reflector::ReflectStructDefinition(
1165 const spirv_cross::TypeID& type_id) const {
1166 const auto& type = compiler_->get_type(type_id);
1167 if (type.basetype != spirv_cross::SPIRType::BaseType::Struct) {
1168 return std::nullopt;
1169 }
1170
1171 const auto struct_name = compiler_->get_name(type_id);
1172 if (struct_name.find("_RESERVED_IDENTIFIER_") != std::string::npos) {
1173 return std::nullopt;
1174 }
1175
1176 auto struct_members = ReadStructMembers(type_id);
1177 auto reflected_struct_size = GetReflectedStructSize(struct_members);
1178
1179 StructDefinition struc;
1180 struc.name = struct_name;
1181 struc.byte_length = reflected_struct_size;
1182 struc.members = std::move(struct_members);
1183 return struc;
1184}
1185
1186nlohmann::json::object_t Reflector::EmitStructDefinition(
1187 std::optional<Reflector::StructDefinition> struc) const {
1188 nlohmann::json::object_t result;
1189 result["name"] = struc->name;
1190 result["byte_length"] = struc->byte_length;
1191 auto& members = result["members"] = nlohmann::json::array_t{};
1192 for (const auto& struct_member : struc->members) {
1193 auto& member = members.emplace_back(nlohmann::json::object_t{});
1194 member["name"] = struct_member.name;
1195 member["type"] = struct_member.type;
1196 member["base_type"] =
1197 StructMember::BaseTypeToString(struct_member.base_type);
1198 member["offset"] = struct_member.offset;
1199 member["byte_length"] = struct_member.byte_length;
1200 if (struct_member.array_elements.has_value()) {
1201 member["array_elements"] = struct_member.array_elements.value();
1202 } else {
1203 member["array_elements"] = "std::nullopt";
1204 }
1205 member["element_padding"] = struct_member.element_padding;
1206 }
1207 return result;
1208}
1209
1211 std::string type_name;
1212 spirv_cross::SPIRType::BaseType base_type;
1213 std::string variable_name;
1214 size_t byte_length = 0u;
1215};
1216
1218 const spirv_cross::Compiler& compiler,
1219 const spirv_cross::Resource* resource) {
1220 VertexType result;
1221 result.variable_name = resource->name;
1222 const auto& type = compiler.get_type(resource->type_id);
1223 result.base_type = type.basetype;
1224 const auto total_size = type.columns * type.vecsize * type.width / 8u;
1225 result.byte_length = total_size;
1226
1227 if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1228 type.columns == 1u && type.vecsize == 2u &&
1229 type.width == sizeof(float) * 8u) {
1230 result.type_name = "Point";
1231 } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1232 type.columns == 1u && type.vecsize == 4u &&
1233 type.width == sizeof(float) * 8u) {
1234 result.type_name = "Vector4";
1235 } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1236 type.columns == 1u && type.vecsize == 3u &&
1237 type.width == sizeof(float) * 8u) {
1238 result.type_name = "Vector3";
1239 } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1240 type.columns == 1u && type.vecsize == 1u &&
1241 type.width == sizeof(float) * 8u) {
1242 result.type_name = "Scalar";
1243 } else if (type.basetype == spirv_cross::SPIRType::BaseType::Int &&
1244 type.columns == 1u && type.vecsize == 1u &&
1245 type.width == sizeof(int32_t) * 8u) {
1246 result.type_name = "int32_t";
1247 } else {
1248 // Catch all unknown padding.
1249 result.type_name = TypeNameWithPaddingOfSize(total_size);
1250 }
1251
1252 return result;
1253}
1254
1255std::optional<Reflector::StructDefinition>
1256Reflector::ReflectPerVertexStructDefinition(
1257 const spirv_cross::SmallVector<spirv_cross::Resource>& stage_inputs) const {
1258 // Avoid emitting a zero sized structure. The code gen templates assume a
1259 // non-zero size.
1260 if (stage_inputs.empty()) {
1261 return std::nullopt;
1262 }
1263
1264 // Validate locations are contiguous and there are no duplicates.
1265 std::set<uint32_t> locations;
1266 for (const auto& input : stage_inputs) {
1267 auto location = compiler_->get_decoration(
1268 input.id, spv::Decoration::DecorationLocation);
1269 if (locations.count(location) != 0) {
1270 // Duplicate location. Bail.
1271 return std::nullopt;
1272 }
1273 locations.insert(location);
1274 }
1275
1276 for (size_t i = 0; i < locations.size(); i++) {
1277 if (locations.count(i) != 1) {
1278 // Locations are not contiguous. This usually happens when a single stage
1279 // input takes multiple input slots. No reflection information can be
1280 // generated for such cases anyway. So bail! It is up to the shader author
1281 // to make sure one stage input maps to a single input slot.
1282 return std::nullopt;
1283 }
1284 }
1285
1286 auto input_for_location =
1287 [&](uint32_t queried_location) -> const spirv_cross::Resource* {
1288 for (const auto& input : stage_inputs) {
1289 auto location = compiler_->get_decoration(
1290 input.id, spv::Decoration::DecorationLocation);
1291 if (location == queried_location) {
1292 return &input;
1293 }
1294 }
1295 // This really cannot happen with all the validation above.
1297 return nullptr;
1298 };
1299
1300 StructDefinition struc;
1301 struc.name = "PerVertexData";
1302 struc.byte_length = 0u;
1303 for (size_t i = 0; i < locations.size(); i++) {
1304 auto resource = input_for_location(i);
1305 if (resource == nullptr) {
1306 return std::nullopt;
1307 }
1308 const auto vertex_type =
1309 VertexTypeFromInputResource(*compiler_.GetCompiler(), resource);
1310
1311 auto member = StructMember{
1312 vertex_type.type_name, // type
1313 vertex_type.base_type, // base type
1314 vertex_type.variable_name, // name
1315 struc.byte_length, // offset
1316 vertex_type.byte_length, // size
1317 vertex_type.byte_length, // byte_length
1318 std::nullopt, // array_elements
1319 0, // element_padding
1320 };
1321 struc.byte_length += vertex_type.byte_length;
1322 struc.members.emplace_back(std::move(member));
1323 }
1324 return struc;
1325}
1326
1327std::optional<std::string> Reflector::GetMemberNameAtIndexIfExists(
1328 const spirv_cross::SPIRType& parent_type,
1329 size_t index) const {
1330 if (parent_type.type_alias != 0) {
1331 return GetMemberNameAtIndexIfExists(
1332 compiler_->get_type(parent_type.type_alias), index);
1333 }
1334
1335 if (auto found = ir_->meta.find(parent_type.self); found != ir_->meta.end()) {
1336 const auto& members = found->second.members;
1337 if (index < members.size() && !members[index].alias.empty()) {
1338 return members[index].alias;
1339 }
1340 }
1341 return std::nullopt;
1342}
1343
1344std::string Reflector::GetMemberNameAtIndex(
1345 const spirv_cross::SPIRType& parent_type,
1346 size_t index,
1347 std::string suffix) const {
1348 if (auto name = GetMemberNameAtIndexIfExists(parent_type, index);
1349 name.has_value()) {
1350 return name.value();
1351 }
1352 static std::atomic_size_t sUnnamedMembersID;
1353 std::stringstream stream;
1354 stream << "unnamed_" << sUnnamedMembersID++ << suffix;
1355 return stream.str();
1356}
1357
1358std::vector<Reflector::BindPrototype> Reflector::ReflectBindPrototypes(
1359 const spirv_cross::ShaderResources& resources,
1360 spv::ExecutionModel execution_model) const {
1361 std::vector<BindPrototype> prototypes;
1362 for (const auto& uniform_buffer : resources.uniform_buffers) {
1363 auto& proto = prototypes.emplace_back(BindPrototype{});
1364 proto.return_type = "bool";
1365 proto.name = ToCamelCase(uniform_buffer.name);
1366 proto.descriptor_type = "DescriptorType::kUniformBuffer";
1367 {
1368 std::stringstream stream;
1369 stream << "Bind uniform buffer for resource named " << uniform_buffer.name
1370 << ".";
1371 proto.docstring = stream.str();
1372 }
1373 proto.args.push_back(BindPrototypeArgument{
1374 .type_name = "ResourceBinder&",
1375 .argument_name = "command",
1376 });
1377 proto.args.push_back(BindPrototypeArgument{
1378 .type_name = "BufferView",
1379 .argument_name = "view",
1380 });
1381 }
1382 for (const auto& storage_buffer : resources.storage_buffers) {
1383 auto& proto = prototypes.emplace_back(BindPrototype{});
1384 proto.return_type = "bool";
1385 proto.name = ToCamelCase(storage_buffer.name);
1386 proto.descriptor_type = "DescriptorType::kStorageBuffer";
1387 {
1388 std::stringstream stream;
1389 stream << "Bind storage buffer for resource named " << storage_buffer.name
1390 << ".";
1391 proto.docstring = stream.str();
1392 }
1393 proto.args.push_back(BindPrototypeArgument{
1394 .type_name = "ResourceBinder&",
1395 .argument_name = "command",
1396 });
1397 proto.args.push_back(BindPrototypeArgument{
1398 .type_name = "BufferView",
1399 .argument_name = "view",
1400 });
1401 }
1402 for (const auto& sampled_image : resources.sampled_images) {
1403 auto& proto = prototypes.emplace_back(BindPrototype{});
1404 proto.return_type = "bool";
1405 proto.name = ToCamelCase(sampled_image.name);
1406 proto.descriptor_type = "DescriptorType::kSampledImage";
1407 {
1408 std::stringstream stream;
1409 stream << "Bind combined image sampler for resource named "
1410 << sampled_image.name << ".";
1411 proto.docstring = stream.str();
1412 }
1413 proto.args.push_back(BindPrototypeArgument{
1414 .type_name = "ResourceBinder&",
1415 .argument_name = "command",
1416 });
1417 proto.args.push_back(BindPrototypeArgument{
1418 .type_name = "std::shared_ptr<const Texture>",
1419 .argument_name = "texture",
1420 });
1421 proto.args.push_back(BindPrototypeArgument{
1422 .type_name = "raw_ptr<const Sampler>",
1423 .argument_name = "sampler",
1424 });
1425 }
1426 for (const auto& separate_image : resources.separate_images) {
1427 auto& proto = prototypes.emplace_back(BindPrototype{});
1428 proto.return_type = "bool";
1429 proto.name = ToCamelCase(separate_image.name);
1430 proto.descriptor_type = "DescriptorType::kImage";
1431 {
1432 std::stringstream stream;
1433 stream << "Bind separate image for resource named " << separate_image.name
1434 << ".";
1435 proto.docstring = stream.str();
1436 }
1437 proto.args.push_back(BindPrototypeArgument{
1438 .type_name = "Command&",
1439 .argument_name = "command",
1440 });
1441 proto.args.push_back(BindPrototypeArgument{
1442 .type_name = "std::shared_ptr<const Texture>",
1443 .argument_name = "texture",
1444 });
1445 }
1446 for (const auto& separate_sampler : resources.separate_samplers) {
1447 auto& proto = prototypes.emplace_back(BindPrototype{});
1448 proto.return_type = "bool";
1449 proto.name = ToCamelCase(separate_sampler.name);
1450 proto.descriptor_type = "DescriptorType::kSampler";
1451 {
1452 std::stringstream stream;
1453 stream << "Bind separate sampler for resource named "
1454 << separate_sampler.name << ".";
1455 proto.docstring = stream.str();
1456 }
1457 proto.args.push_back(BindPrototypeArgument{
1458 .type_name = "Command&",
1459 .argument_name = "command",
1460 });
1461 proto.args.push_back(BindPrototypeArgument{
1462 .type_name = "std::shared_ptr<const Sampler>",
1463 .argument_name = "sampler",
1464 });
1465 }
1466 return prototypes;
1467}
1468
1469nlohmann::json::array_t Reflector::EmitBindPrototypes(
1470 const spirv_cross::ShaderResources& resources,
1471 spv::ExecutionModel execution_model) const {
1472 const auto prototypes = ReflectBindPrototypes(resources, execution_model);
1473 nlohmann::json::array_t result;
1474 for (const auto& res : prototypes) {
1475 auto& item = result.emplace_back(nlohmann::json::object_t{});
1476 item["return_type"] = res.return_type;
1477 item["name"] = res.name;
1478 item["docstring"] = res.docstring;
1479 item["descriptor_type"] = res.descriptor_type;
1480 auto& args = item["args"] = nlohmann::json::array_t{};
1481 for (const auto& arg : res.args) {
1482 auto& json_arg = args.emplace_back(nlohmann::json::object_t{});
1483 json_arg["type_name"] = arg.type_name;
1484 json_arg["argument_name"] = arg.argument_name;
1485 }
1486 }
1487 return result;
1488}
1489
1490} // namespace compiler
1491} // namespace impeller
GLenum type
static const char * kVulkanUBOName
Reflector(Options options, const std::shared_ptr< const spirv_cross::ParsedIR > &ir, const std::shared_ptr< fml::Mapping > &shader_data, const CompilerBackend &compiler)
Definition reflector.cc:63
std::shared_ptr< fml::Mapping > GetReflectionJSON() const
Definition reflector.cc:109
std::shared_ptr< fml::Mapping > GetReflectionCC() const
Definition reflector.cc:126
std::shared_ptr< RuntimeStageData::Shader > GetRuntimeStageShaderData() const
Definition reflector.cc:130
std::shared_ptr< ShaderBundleData > GetShaderBundleData() const
Definition reflector.cc:135
std::shared_ptr< fml::Mapping > GetReflectionHeader() const
Definition reflector.cc:122
static int input(yyscan_t yyscanner)
uint32_t location
int32_t value
FlutterVulkanImage * image
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
#define FML_CHECK(condition)
Definition logging.h:104
#define FML_UNREACHABLE()
Definition logging.h:128
const char * name
Definition fuchsia.cc:49
Vector2 padding
The halo padding in source space.
std::array< MockImage, 3 > images
it will be possible to load the file into Perfetto s trace viewer use test Running tests that layout and measure text will not yield consistent results across various platforms Enabling this option will make font resolution default to the Ahem test font on all disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
static std::string TypeNameWithPaddingOfSize(size_t size)
Definition reflector.cc:758
static VertexType VertexTypeFromInputResource(const spirv_cross::Compiler &compiler, const spirv_cross::Resource *resource)
static std::string ToString(CompilerBackend::Type type)
Definition reflector.cc:590
static std::optional< RuntimeStageBackend > GetRuntimeStageBackend(TargetPlatform target_platform)
Definition reflector.cc:316
static size_t GetReflectedStructSize(const std::vector< StructMember > &members)
Get the reflected struct size. In the vast majority of the cases, this is the same as the declared st...
Definition reflector.cc:816
static std::string ExecutionModelToString(spv::ExecutionModel model)
Definition reflector.cc:34
static std::string StringToShaderStage(const std::string &str)
Definition reflector.cc:47
constexpr std::string_view kReflectionHeaderTemplate
std::string ToCamelCase(std::string_view string)
Definition utilities.cc:38
constexpr std::string_view kReflectionCCTemplate
static std::optional< KnownType > ReadKnownScalarType(spirv_cross::SPIRType::BaseType type)
Definition reflector.cc:769
bool StringStartsWith(const std::string &target, const std::string &prefix)
Definition utilities.cc:86
std::vector< spirv_cross::ID > SortUniforms(const spirv_cross::ParsedIR *ir, const spirv_cross::Compiler *compiler, std::optional< spirv_cross::SPIRType::BaseType > type_filter, bool include)
Sorts uniform declarations in an IR according to decoration order.
float Scalar
Definition scalar.h:19
TPoint< Scalar > Point
Definition point.h:425
TPoint< int32_t > IPoint32
Definition point.h:427
TPoint< uint32_t > UintPoint32
Definition point.h:428
Definition ref_ptr.h:261
A storage only class for half precision floating point.
Definition half.h:41
spirv_cross::Compiler * GetCompiler()
uint32_t GetExtendedMSLResourceBinding(ExtendedResourceIndex index, spirv_cross::ID id) const
static std::string BaseTypeToString(spirv_cross::SPIRType::BaseType type)
Definition reflector.h:44
spirv_cross::SPIRType::BaseType base_type
std::shared_ptr< const fml::Mapping > data
#define VALIDATION_LOG
Definition validation.h:91