Flutter Engine
 
Loading...
Searching...
No Matches
reflector.cc
Go to the documentation of this file.
1// Copyright 2013 The Flutter Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// FLUTTER_NOLINT: https://github.com/flutter/flutter/issues/105732
6
8
9#include <atomic>
10#include <format>
11#include <optional>
12#include <set>
13#include <sstream>
14
15#include "flutter/fml/logging.h"
16#include "fml/backtrace.h"
29#include "spirv_common.hpp"
30
31namespace impeller {
32namespace compiler {
33
34static std::string ExecutionModelToString(spv::ExecutionModel model) {
35 switch (model) {
36 case spv::ExecutionModel::ExecutionModelVertex:
37 return "vertex";
38 case spv::ExecutionModel::ExecutionModelFragment:
39 return "fragment";
40 case spv::ExecutionModel::ExecutionModelGLCompute:
41 return "compute";
42 default:
43 return "unsupported";
44 }
45}
46
47static std::string StringToShaderStage(const std::string& str) {
48 if (str == "vertex") {
49 return "ShaderStage::kVertex";
50 }
51
52 if (str == "fragment") {
53 return "ShaderStage::kFragment";
54 }
55
56 if (str == "compute") {
57 return "ShaderStage::kCompute";
58 }
59
60 return "ShaderStage::kUnknown";
61}
62
64 const std::shared_ptr<const spirv_cross::ParsedIR>& ir,
65 const std::shared_ptr<fml::Mapping>& shader_data,
66 const CompilerBackend& compiler)
67 : options_(std::move(options)),
68 ir_(ir),
69 shader_data_(shader_data),
70 compiler_(compiler) {
71 if (!ir_ || !compiler_) {
72 return;
73 }
74
75 if (auto template_arguments = GenerateTemplateArguments();
76 template_arguments.has_value()) {
77 template_arguments_ =
78 std::make_unique<nlohmann::json>(std::move(template_arguments.value()));
79 } else {
80 return;
81 }
82
83 reflection_header_ = GenerateReflectionHeader();
84 if (!reflection_header_) {
85 return;
86 }
87
88 reflection_cc_ = GenerateReflectionCC();
89 if (!reflection_cc_) {
90 return;
91 }
92
93 runtime_stage_shader_ = GenerateRuntimeStageData();
94
95 shader_bundle_data_ = GenerateShaderBundleData();
96 if (!shader_bundle_data_) {
97 return;
98 }
99
100 is_valid_ = true;
101}
102
103Reflector::~Reflector() = default;
104
105bool Reflector::IsValid() const {
106 return is_valid_;
107}
108
109std::shared_ptr<fml::Mapping> Reflector::GetReflectionJSON() const {
110 if (!is_valid_) {
111 return nullptr;
112 }
113
114 auto json_string =
115 std::make_shared<std::string>(template_arguments_->dump(2u));
116
117 return std::make_shared<fml::NonOwnedMapping>(
118 reinterpret_cast<const uint8_t*>(json_string->data()),
119 json_string->size(), [json_string](auto, auto) {});
120}
121
122std::shared_ptr<fml::Mapping> Reflector::GetReflectionHeader() const {
123 return reflection_header_;
124}
125
126std::shared_ptr<fml::Mapping> Reflector::GetReflectionCC() const {
127 return reflection_cc_;
128}
129
130std::shared_ptr<RuntimeStageData::Shader> Reflector::GetRuntimeStageShaderData()
131 const {
132 return runtime_stage_shader_;
133}
134
135std::shared_ptr<ShaderBundleData> Reflector::GetShaderBundleData() const {
136 return shader_bundle_data_;
137}
138
139std::optional<nlohmann::json> Reflector::GenerateTemplateArguments() const {
140 nlohmann::json root;
141
142 const auto& entrypoints = compiler_->get_entry_points_and_stages();
143 if (entrypoints.size() != 1) {
144 VALIDATION_LOG << "Incorrect number of entrypoints in the shader. Found "
145 << entrypoints.size() << " but expected 1.";
146 return std::nullopt;
147 }
148
149 auto execution_model = entrypoints.front().execution_model;
150 {
151 root["entrypoint"] = options_.entry_point_name;
152 root["shader_name"] = options_.shader_name;
153 root["shader_stage"] = ExecutionModelToString(execution_model);
154 root["header_file_name"] = options_.header_file_name;
155 }
156
157 const auto shader_resources = compiler_->get_shader_resources();
158
159 // Subpass Inputs.
160 {
161 auto& subpass_inputs = root["subpass_inputs"] = nlohmann::json::array_t{};
162 if (auto subpass_inputs_json =
163 ReflectResources(shader_resources.subpass_inputs);
164 subpass_inputs_json.has_value()) {
165 for (auto subpass_input : subpass_inputs_json.value()) {
166 subpass_input["descriptor_type"] = "DescriptorType::kInputAttachment";
167 subpass_inputs.emplace_back(std::move(subpass_input));
168 }
169 } else {
170 return std::nullopt;
171 }
172 }
173
174 // Uniform and storage buffers.
175 {
176 auto& buffers = root["buffers"] = nlohmann::json::array_t{};
177 if (auto uniform_buffers_json =
178 ReflectResources(shader_resources.uniform_buffers);
179 uniform_buffers_json.has_value()) {
180 for (auto uniform_buffer : uniform_buffers_json.value()) {
181 uniform_buffer["descriptor_type"] = "DescriptorType::kUniformBuffer";
182 buffers.emplace_back(std::move(uniform_buffer));
183 }
184 } else {
185 return std::nullopt;
186 }
187 if (auto storage_buffers_json =
188 ReflectResources(shader_resources.storage_buffers);
189 storage_buffers_json.has_value()) {
190 for (auto uniform_buffer : storage_buffers_json.value()) {
191 uniform_buffer["descriptor_type"] = "DescriptorType::kStorageBuffer";
192 buffers.emplace_back(std::move(uniform_buffer));
193 }
194 } else {
195 return std::nullopt;
196 }
197 }
198
199 {
200 auto& stage_inputs = root["stage_inputs"] = nlohmann::json::array_t{};
201 if (auto stage_inputs_json = ReflectResources(
202 shader_resources.stage_inputs,
203 /*compute_offsets=*/execution_model == spv::ExecutionModelVertex);
204 stage_inputs_json.has_value()) {
205 stage_inputs = std::move(stage_inputs_json.value());
206 } else {
207 return std::nullopt;
208 }
209 }
210
211 {
212 auto combined_sampled_images =
213 ReflectResources(shader_resources.sampled_images);
214 auto images = ReflectResources(shader_resources.separate_images);
215 auto samplers = ReflectResources(shader_resources.separate_samplers);
216 if (!combined_sampled_images.has_value() || !images.has_value() ||
217 !samplers.has_value()) {
218 return std::nullopt;
219 }
220 auto& sampled_images = root["sampled_images"] = nlohmann::json::array_t{};
221 for (auto value : combined_sampled_images.value()) {
222 value["descriptor_type"] = "DescriptorType::kSampledImage";
223 sampled_images.emplace_back(std::move(value));
224 }
225 for (auto value : images.value()) {
226 value["descriptor_type"] = "DescriptorType::kImage";
227 sampled_images.emplace_back(std::move(value));
228 }
229 for (auto value : samplers.value()) {
230 value["descriptor_type"] = "DescriptorType::kSampledSampler";
231 sampled_images.emplace_back(std::move(value));
232 }
233 }
234
235 if (auto stage_outputs = ReflectResources(shader_resources.stage_outputs);
236 stage_outputs.has_value()) {
237 root["stage_outputs"] = std::move(stage_outputs.value());
238 } else {
239 return std::nullopt;
240 }
241
242 {
243 auto& struct_definitions = root["struct_definitions"] =
244 nlohmann::json::array_t{};
245 if (entrypoints.front().execution_model ==
246 spv::ExecutionModel::ExecutionModelVertex &&
247 !shader_resources.stage_inputs.empty()) {
248 if (auto struc =
249 ReflectPerVertexStructDefinition(shader_resources.stage_inputs);
250 struc.has_value()) {
251 struct_definitions.emplace_back(EmitStructDefinition(struc.value()));
252 } else {
253 // If there are stage inputs, it is an error to not generate a per
254 // vertex data struct for a vertex like shader stage.
255 return std::nullopt;
256 }
257 }
258
259 std::set<spirv_cross::ID> known_structs;
260 ir_->for_each_typed_id<spirv_cross::SPIRType>(
261 [&](uint32_t, const spirv_cross::SPIRType& type) {
262 if (type.basetype != spirv_cross::SPIRType::BaseType::Struct) {
263 return;
264 }
265 // Skip structs that do not have layout offset decorations.
266 // These structs are used internally within the shader and are not
267 // part of the shader's interface.
268 for (size_t i = 0; i < type.member_types.size(); i++) {
269 if (!compiler_->has_member_decoration(type.self, i,
270 spv::DecorationOffset)) {
271 return;
272 }
273 }
274 if (known_structs.find(type.self) != known_structs.end()) {
275 // Iterating over types this way leads to duplicates which may cause
276 // duplicate struct definitions.
277 return;
278 }
279 known_structs.insert(type.self);
280 if (auto struc = ReflectStructDefinition(type.self);
281 struc.has_value()) {
282 struct_definitions.emplace_back(
283 EmitStructDefinition(struc.value()));
284 }
285 });
286 }
287
288 root["bind_prototypes"] =
289 EmitBindPrototypes(shader_resources, execution_model);
290
291 return root;
292}
293
294std::shared_ptr<fml::Mapping> Reflector::GenerateReflectionHeader() const {
295 return InflateTemplate(kReflectionHeaderTemplate);
296}
297
298std::shared_ptr<fml::Mapping> Reflector::GenerateReflectionCC() const {
299 return InflateTemplate(kReflectionCCTemplate);
300}
301
325
326std::shared_ptr<RuntimeStageData::Shader> Reflector::GenerateRuntimeStageData()
327 const {
328 auto backend = GetRuntimeStageBackend(options_.target_platform);
329 if (!backend.has_value()) {
330 return nullptr;
331 }
332
333 const auto& entrypoints = compiler_->get_entry_points_and_stages();
334 if (entrypoints.size() != 1u) {
335 VALIDATION_LOG << "Single entrypoint not found.";
336 return nullptr;
337 }
338 auto data = std::make_unique<RuntimeStageData::Shader>();
339 data->entrypoint = options_.entry_point_name;
340 data->stage = entrypoints.front().execution_model;
341 data->shader = shader_data_;
342 data->backend = backend.value();
343
344 // Sort the IR so that the uniforms are in declaration order.
345 std::vector<spirv_cross::ID> uniforms =
346 SortUniforms(ir_.get(), compiler_.GetCompiler());
347 for (auto& sorted_id : uniforms) {
348 auto var = ir_->ids[sorted_id].get<spirv_cross::SPIRVariable>();
349 const auto spir_type = compiler_->get_type(var.basetype);
350 UniformDescription uniform_description;
351 uniform_description.name = compiler_->get_name(var.self);
352 uniform_description.location = compiler_->get_decoration(
353 var.self, spv::Decoration::DecorationLocation);
354 uniform_description.binding =
355 compiler_->get_decoration(var.self, spv::Decoration::DecorationBinding);
356 uniform_description.type = spir_type.basetype;
357 uniform_description.rows = spir_type.vecsize;
358 uniform_description.columns = spir_type.columns;
359 uniform_description.bit_width = spir_type.width;
360 uniform_description.array_elements = GetArrayElements(spir_type);
362 spir_type.basetype ==
363 spirv_cross::SPIRType::BaseType::SampledImage)
364 << "Vulkan runtime effect had unexpected uniforms outside of the "
365 "uniform buffer object.";
366 data->uniforms.emplace_back(std::move(uniform_description));
367 }
368
369 const auto ubos = compiler_->get_shader_resources().uniform_buffers;
370 if (data->backend == RuntimeStageBackend::kVulkan && !ubos.empty()) {
371 if (ubos.size() != 1 && ubos[0].name != RuntimeStage::kVulkanUBOName) {
372 VALIDATION_LOG << "Expected a single UBO resource named "
373 "'"
375 << "' "
376 "for Vulkan runtime stage backend.";
377 return nullptr;
378 }
379
380 const auto& ubo = ubos[0];
381
382 size_t binding =
383 compiler_->get_decoration(ubo.id, spv::Decoration::DecorationBinding);
384 auto members = ReadStructMembers(ubo.type_id);
385 std::vector<uint8_t> struct_layout;
386 size_t float_count = 0;
387
388 for (size_t i = 0; i < members.size(); i += 1) {
389 const auto& member = members[i];
390 std::vector<int> bytes;
391 switch (member.underlying_type) {
393 size_t padding_count =
394 (member.size + sizeof(float) - 1) / sizeof(float);
395 while (padding_count > 0) {
396 struct_layout.push_back(0);
397 padding_count--;
398 }
399 break;
400 }
402 if (member.array_elements > 1) {
403 // For each array element member, insert 1 layout property per byte
404 // and 0 layout property per byte of padding
405 for (auto i = 0; i < member.array_elements; i++) {
406 for (auto j = 0u; j < member.size / sizeof(float); j++) {
407 struct_layout.push_back(1);
408 }
409 for (auto j = 0u; j < member.element_padding / sizeof(float);
410 j++) {
411 struct_layout.push_back(0);
412 }
413 }
414 } else {
415 size_t member_float_count = member.byte_length / sizeof(float);
416 float_count += member_float_count;
417 while (member_float_count > 0) {
418 struct_layout.push_back(1);
419 member_float_count--;
420 }
421 }
422 break;
423 }
425 VALIDATION_LOG << "Non-floating-type struct member " << member.name
426 << " is not supported.";
427 return nullptr;
428 }
429 }
430 data->uniforms.emplace_back(UniformDescription{
431 .name = ubo.name,
432 .location = binding,
433 .binding = binding,
434 .type = spirv_cross::SPIRType::Struct,
435 .struct_layout = std::move(struct_layout),
436 .struct_float_count = float_count,
437 });
438 }
439
440 // We only need to worry about storing vertex attributes.
441 if (entrypoints.front().execution_model == spv::ExecutionModelVertex) {
442 const auto inputs = compiler_->get_shader_resources().stage_inputs;
443 auto input_offsets = ComputeOffsets(inputs);
444 for (const auto& input : inputs) {
445 std::optional<size_t> offset = GetOffset(input.id, input_offsets);
446
447 const auto type = compiler_->get_type(input.type_id);
448
449 InputDescription input_description;
450 input_description.name = input.name;
451 input_description.location = compiler_->get_decoration(
452 input.id, spv::Decoration::DecorationLocation);
453 input_description.set = compiler_->get_decoration(
454 input.id, spv::Decoration::DecorationDescriptorSet);
455 input_description.binding = compiler_->get_decoration(
456 input.id, spv::Decoration::DecorationBinding);
457 input_description.type = type.basetype;
458 input_description.bit_width = type.width;
459 input_description.vec_size = type.vecsize;
460 input_description.columns = type.columns;
461 input_description.offset = offset.value_or(0u);
462 data->inputs.emplace_back(std::move(input_description));
463 }
464 }
465
466 return data;
467}
468
469std::shared_ptr<ShaderBundleData> Reflector::GenerateShaderBundleData() const {
470 const auto& entrypoints = compiler_->get_entry_points_and_stages();
471 if (entrypoints.size() != 1u) {
472 VALIDATION_LOG << "Single entrypoint not found.";
473 return nullptr;
474 }
475 auto data = std::make_shared<ShaderBundleData>(
476 options_.entry_point_name, //
477 entrypoints.front().execution_model, //
478 options_.target_platform //
479 );
480 data->SetShaderData(shader_data_);
481
482 const auto uniforms = compiler_->get_shader_resources().uniform_buffers;
483 for (const auto& uniform : uniforms) {
484 ShaderBundleData::ShaderUniformStruct uniform_struct;
485 uniform_struct.name = uniform.name;
486 uniform_struct.ext_res_0 = compiler_.GetExtendedMSLResourceBinding(
488 uniform_struct.set = compiler_->get_decoration(
489 uniform.id, spv::Decoration::DecorationDescriptorSet);
490 uniform_struct.binding = compiler_->get_decoration(
491 uniform.id, spv::Decoration::DecorationBinding);
492
493 const auto type = compiler_->get_type(uniform.type_id);
494 if (type.basetype != spirv_cross::SPIRType::BaseType::Struct) {
495 std::cerr << "Error: Uniform \"" << uniform.name
496 << "\" is not a struct. All Flutter GPU shader uniforms must "
497 "be structs."
498 << std::endl;
499 return nullptr;
500 }
501
502 size_t size_in_bytes = 0;
503 for (const auto& struct_member : ReadStructMembers(uniform.type_id)) {
504 size_in_bytes += struct_member.byte_length;
505 if (StringStartsWith(struct_member.name, "_PADDING_")) {
506 continue;
507 }
508 ShaderBundleData::ShaderUniformStructField uniform_struct_field;
509 uniform_struct_field.name = struct_member.name;
510 uniform_struct_field.type = struct_member.base_type;
511 uniform_struct_field.offset_in_bytes = struct_member.offset;
512 uniform_struct_field.element_size_in_bytes = struct_member.size;
513 uniform_struct_field.total_size_in_bytes = struct_member.byte_length;
514 uniform_struct_field.array_elements = struct_member.array_elements;
515 uniform_struct.fields.push_back(uniform_struct_field);
516 }
517 uniform_struct.size_in_bytes = size_in_bytes;
518
519 data->AddUniformStruct(uniform_struct);
520 }
521
522 const auto sampled_images = compiler_->get_shader_resources().sampled_images;
523 for (const auto& image : sampled_images) {
524 ShaderBundleData::ShaderUniformTexture uniform_texture;
525 uniform_texture.name = image.name;
526 uniform_texture.ext_res_0 = compiler_.GetExtendedMSLResourceBinding(
528 uniform_texture.set = compiler_->get_decoration(
529 image.id, spv::Decoration::DecorationDescriptorSet);
530 uniform_texture.binding =
531 compiler_->get_decoration(image.id, spv::Decoration::DecorationBinding);
532 data->AddUniformTexture(uniform_texture);
533 }
534
535 // We only need to worry about storing vertex attributes.
536 if (entrypoints.front().execution_model == spv::ExecutionModelVertex) {
537 const auto inputs = compiler_->get_shader_resources().stage_inputs;
538 auto input_offsets = ComputeOffsets(inputs);
539 for (const auto& input : inputs) {
540 std::optional<size_t> offset = GetOffset(input.id, input_offsets);
541
542 const auto type = compiler_->get_type(input.type_id);
543
544 InputDescription input_description;
545 input_description.name = input.name;
546 input_description.location = compiler_->get_decoration(
547 input.id, spv::Decoration::DecorationLocation);
548 input_description.set = compiler_->get_decoration(
549 input.id, spv::Decoration::DecorationDescriptorSet);
550 input_description.binding = compiler_->get_decoration(
551 input.id, spv::Decoration::DecorationBinding);
552 input_description.type = type.basetype;
553 input_description.bit_width = type.width;
554 input_description.vec_size = type.vecsize;
555 input_description.columns = type.columns;
556 input_description.offset = offset.value_or(0u);
557 data->AddInputDescription(std::move(input_description));
558 }
559 }
560
561 return data;
562}
563
564std::optional<uint32_t> Reflector::GetArrayElements(
565 const spirv_cross::SPIRType& type) const {
566 if (type.array.empty()) {
567 return std::nullopt;
568 }
569 FML_CHECK(type.array.size() == 1)
570 << "Multi-dimensional arrays are not supported.";
571 FML_CHECK(type.array_size_literal.front())
572 << "Must use a literal for array sizes.";
573 return type.array.front();
574}
575
577 switch (type) {
579 return "Metal Shading Language";
581 return "OpenGL Shading Language";
583 return "OpenGL Shading Language (Relaxed Vulkan Semantics)";
585 return "SkSL Shading Language";
586 }
588}
589
590std::shared_ptr<fml::Mapping> Reflector::InflateTemplate(
591 std::string_view tmpl) const {
592 inja::Environment env;
593 env.set_trim_blocks(true);
594 env.set_lstrip_blocks(true);
595
596 env.add_callback("camel_case", 1u, [](inja::Arguments& args) {
597 return ToCamelCase(args.at(0u)->get<std::string>());
598 });
599
600 env.add_callback("to_shader_stage", 1u, [](inja::Arguments& args) {
601 return StringToShaderStage(args.at(0u)->get<std::string>());
602 });
603
604 env.add_callback("get_generator_name", 0u,
605 [type = compiler_.GetType()](inja::Arguments& args) {
606 return ToString(type);
607 });
608
609 auto inflated_template =
610 std::make_shared<std::string>(env.render(tmpl, *template_arguments_));
611
612 return std::make_shared<fml::NonOwnedMapping>(
613 reinterpret_cast<const uint8_t*>(inflated_template->data()),
614 inflated_template->size(), [inflated_template](auto, auto) {});
615}
616
617std::vector<size_t> Reflector::ComputeOffsets(
618 const spirv_cross::SmallVector<spirv_cross::Resource>& resources) const {
619 std::vector<size_t> offsets(resources.size(), 0);
620 if (resources.size() == 0) {
621 return offsets;
622 }
623 for (const auto& resource : resources) {
624 const auto type = compiler_->get_type(resource.type_id);
625 auto location = compiler_->get_decoration(
626 resource.id, spv::Decoration::DecorationLocation);
627 // Malformed shader, will be caught later on.
628 if (location >= resources.size() || location < 0) {
629 location = 0;
630 }
631 offsets[location] = (type.width * type.vecsize) / 8;
632 }
633 for (size_t i = 1; i < resources.size(); i++) {
634 offsets[i] += offsets[i - 1];
635 }
636 for (size_t i = resources.size() - 1; i > 0; i--) {
637 offsets[i] = offsets[i - 1];
638 }
639 offsets[0] = 0;
640
641 return offsets;
642}
643
644std::optional<size_t> Reflector::GetOffset(
645 spirv_cross::ID id,
646 const std::vector<size_t>& offsets) const {
647 uint32_t location =
648 compiler_->get_decoration(id, spv::Decoration::DecorationLocation);
649 if (location >= offsets.size()) {
650 return std::nullopt;
651 }
652 return offsets[location];
653}
654
655std::optional<nlohmann::json::object_t> Reflector::ReflectResource(
656 const spirv_cross::Resource& resource,
657 std::optional<size_t> offset) const {
658 nlohmann::json::object_t result;
659
660 result["name"] = resource.name;
661 result["descriptor_set"] = compiler_->get_decoration(
662 resource.id, spv::Decoration::DecorationDescriptorSet);
663 result["binding"] = compiler_->get_decoration(
664 resource.id, spv::Decoration::DecorationBinding);
665 result["set"] = compiler_->get_decoration(
666 resource.id, spv::Decoration::DecorationDescriptorSet);
667 result["location"] = compiler_->get_decoration(
668 resource.id, spv::Decoration::DecorationLocation);
669 result["index"] =
670 compiler_->get_decoration(resource.id, spv::Decoration::DecorationIndex);
671 result["ext_res_0"] = compiler_.GetExtendedMSLResourceBinding(
673 result["ext_res_1"] = compiler_.GetExtendedMSLResourceBinding(
675 result["relaxed_precision"] =
676 compiler_->get_decoration(
677 resource.id, spv::Decoration::DecorationRelaxedPrecision) == 1;
678 result["offset"] = offset.value_or(0u);
679 auto type = ReflectType(resource.type_id);
680 if (!type.has_value()) {
681 return std::nullopt;
682 }
683 result["type"] = std::move(type.value());
684 return result;
685}
686
687std::optional<nlohmann::json::object_t> Reflector::ReflectType(
688 const spirv_cross::TypeID& type_id) const {
689 nlohmann::json::object_t result;
690
691 const auto type = compiler_->get_type(type_id);
692
693 result["type_name"] = StructMember::BaseTypeToString(type.basetype);
694 result["bit_width"] = type.width;
695 result["vec_size"] = type.vecsize;
696 result["columns"] = type.columns;
697 auto& members = result["members"] = nlohmann::json::array_t{};
698 if (type.basetype == spirv_cross::SPIRType::BaseType::Struct) {
699 for (const auto& struct_member : ReadStructMembers(type_id)) {
700 auto member = nlohmann::json::object_t{};
701 member["name"] = struct_member.name;
702 member["type"] = struct_member.type;
703 member["base_type"] =
704 StructMember::BaseTypeToString(struct_member.base_type);
705 member["offset"] = struct_member.offset;
706 member["size"] = struct_member.size;
707 member["byte_length"] = struct_member.byte_length;
708 if (struct_member.array_elements.has_value()) {
709 member["array_elements"] = struct_member.array_elements.value();
710 } else {
711 member["array_elements"] = "std::nullopt";
712 }
713 members.emplace_back(std::move(member));
714 }
715 }
716
717 return result;
718}
719
720std::optional<nlohmann::json::array_t> Reflector::ReflectResources(
721 const spirv_cross::SmallVector<spirv_cross::Resource>& resources,
722 bool compute_offsets) const {
723 nlohmann::json::array_t result;
724 result.reserve(resources.size());
725 std::vector<size_t> offsets;
726 if (compute_offsets) {
727 offsets = ComputeOffsets(resources);
728 }
729 for (const auto& resource : resources) {
730 std::optional<size_t> maybe_offset = std::nullopt;
731 if (compute_offsets) {
732 maybe_offset = GetOffset(resource.id, offsets);
733 }
734 if (auto reflected = ReflectResource(resource, maybe_offset);
735 reflected.has_value()) {
736 result.emplace_back(std::move(reflected.value()));
737 } else {
738 return std::nullopt;
739 }
740 }
741 return result;
742}
743
744static std::string TypeNameWithPaddingOfSize(size_t size) {
745 std::stringstream stream;
746 stream << "Padding<" << size << ">";
747 return stream.str();
748}
749
750struct KnownType {
751 std::string name;
752 size_t byte_size = 0;
753};
754
755static std::optional<KnownType> ReadKnownScalarType(
756 spirv_cross::SPIRType::BaseType type) {
757 switch (type) {
758 case spirv_cross::SPIRType::BaseType::Boolean:
759 return KnownType{
760 .name = "bool",
761 .byte_size = sizeof(bool),
762 };
763 case spirv_cross::SPIRType::BaseType::Float:
764 return KnownType{
765 .name = "Scalar",
766 .byte_size = sizeof(Scalar),
767 };
768 case spirv_cross::SPIRType::BaseType::Half:
769 return KnownType{
770 .name = "Half",
771 .byte_size = sizeof(Half),
772 };
773 case spirv_cross::SPIRType::BaseType::UInt:
774 return KnownType{
775 .name = "uint32_t",
776 .byte_size = sizeof(uint32_t),
777 };
778 case spirv_cross::SPIRType::BaseType::Int:
779 return KnownType{
780 .name = "int32_t",
781 .byte_size = sizeof(int32_t),
782 };
783 default:
784 break;
785 }
786 return std::nullopt;
787}
788
789//------------------------------------------------------------------------------
790/// @brief Get the reflected struct size. In the vast majority of the
791/// cases, this is the same as the declared struct size as given by
792/// the compiler. But, additional padding may need to be introduced
793/// after the end of the struct to keep in line with the alignment
794/// requirement of the individual struct members. This method
795/// figures out the actual size of the reflected struct that can be
796/// referenced in native code.
797///
798/// @param[in] members The members
799///
800/// @return The reflected structure size.
801///
802static size_t GetReflectedStructSize(const std::vector<StructMember>& members) {
803 auto struct_size = 0u;
804 for (const auto& member : members) {
805 struct_size += member.byte_length;
806 }
807 return struct_size;
808}
809
810std::vector<StructMember> Reflector::ReadStructMembers(
811 const spirv_cross::TypeID& type_id) const {
812 const auto& struct_type = compiler_->get_type(type_id);
813 FML_CHECK(struct_type.basetype == spirv_cross::SPIRType::BaseType::Struct);
814
815 std::vector<StructMember> result;
816
817 size_t current_byte_offset = 0;
818 size_t max_member_alignment = 0;
819
820 for (size_t i = 0; i < struct_type.member_types.size(); i++) {
821 const auto& member = compiler_->get_type(struct_type.member_types[i]);
822 const auto struct_member_offset =
823 compiler_->type_struct_member_offset(struct_type, i);
824 auto array_elements = GetArrayElements(member);
825
826 if (struct_member_offset > current_byte_offset) {
827 const auto alignment_pad = struct_member_offset - current_byte_offset;
828 result.emplace_back(StructMember{
829 TypeNameWithPaddingOfSize(alignment_pad), // type
830 spirv_cross::SPIRType::BaseType::Void, // basetype
831 std::format("_PADDING_{}_",
832 GetMemberNameAtIndex(struct_type, i)), // name
833 current_byte_offset, // offset
834 alignment_pad, // size
835 alignment_pad, // byte_length
836 std::nullopt, // array_elements
837 0, // element_padding
838 });
839 current_byte_offset += alignment_pad;
840 }
841
842 max_member_alignment =
843 std::max<size_t>(max_member_alignment,
844 (member.width / 8) * member.columns * member.vecsize);
845
846 FML_CHECK(current_byte_offset == struct_member_offset);
847
848 // A user defined struct.
849 if (member.basetype == spirv_cross::SPIRType::BaseType::Struct) {
850 const size_t size =
851 GetReflectedStructSize(ReadStructMembers(member.self));
852 uint32_t stride = GetArrayStride<0>(struct_type, member, i);
853 if (stride == 0) {
854 stride = size;
855 }
856 uint32_t element_padding = stride - size;
857 result.emplace_back(StructMember{
858 compiler_->get_name(member.self), // type
859 member.basetype, // basetype
860 GetMemberNameAtIndex(struct_type, i), // name
861 struct_member_offset, // offset
862 size, // size
863 stride * array_elements.value_or(1), // byte_length
864 array_elements, // array_elements
865 element_padding, // element_padding
866 });
867 current_byte_offset += stride * array_elements.value_or(1);
868 continue;
869 }
870
871 // Tightly packed 4x4 Matrix is special cased as we know how to work with
872 // those.
873 if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
874 member.width == sizeof(Scalar) * 8 && //
875 member.columns == 4 && //
876 member.vecsize == 4 //
877 ) {
878 uint32_t stride = GetArrayStride<sizeof(Matrix)>(struct_type, member, i);
879 uint32_t element_padding = stride - sizeof(Matrix);
880 result.emplace_back(StructMember{
881 "Matrix", // type
882 member.basetype, // basetype
883 GetMemberNameAtIndex(struct_type, i), // name
884 struct_member_offset, // offset
885 sizeof(Matrix), // size
886 stride * array_elements.value_or(1), // byte_length
887 array_elements, // array_elements
888 element_padding, // element_padding
889 });
890 current_byte_offset += stride * array_elements.value_or(1);
891 continue;
892 }
893
894 // Tightly packed UintPoint32 (uvec2)
895 if (member.basetype == spirv_cross::SPIRType::BaseType::UInt && //
896 member.width == sizeof(uint32_t) * 8 && //
897 member.columns == 1 && //
898 member.vecsize == 2 //
899 ) {
900 uint32_t stride =
901 GetArrayStride<sizeof(UintPoint32)>(struct_type, member, i);
902 uint32_t element_padding = stride - sizeof(UintPoint32);
903 result.emplace_back(StructMember{
904 "UintPoint32", // type
905 member.basetype, // basetype
906 GetMemberNameAtIndex(struct_type, i), // name
907 struct_member_offset, // offset
908 sizeof(UintPoint32), // size
909 stride * array_elements.value_or(1), // byte_length
910 array_elements, // array_elements
911 element_padding, // element_padding
912 });
913 current_byte_offset += stride * array_elements.value_or(1);
914 continue;
915 }
916
917 // Tightly packed UintPoint32 (ivec2)
918 if (member.basetype == spirv_cross::SPIRType::BaseType::Int && //
919 member.width == sizeof(int32_t) * 8 && //
920 member.columns == 1 && //
921 member.vecsize == 2 //
922 ) {
923 uint32_t stride =
924 GetArrayStride<sizeof(IPoint32)>(struct_type, member, i);
925 uint32_t element_padding = stride - sizeof(IPoint32);
926 result.emplace_back(StructMember{
927 "IPoint32", // type
928 member.basetype, // basetype
929 GetMemberNameAtIndex(struct_type, i), // name
930 struct_member_offset, // offset
931 sizeof(IPoint32), // size
932 stride * array_elements.value_or(1), // byte_length
933 array_elements, // array_elements
934 element_padding, // element_padding
935 });
936 current_byte_offset += stride * array_elements.value_or(1);
937 continue;
938 }
939
940 // Tightly packed Point (vec2).
941 if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
942 member.width == sizeof(float) * 8 && //
943 member.columns == 1 && //
944 member.vecsize == 2 //
945 ) {
946 uint32_t stride = GetArrayStride<sizeof(Point)>(struct_type, member, i);
947 uint32_t element_padding = stride - sizeof(Point);
948 result.emplace_back(StructMember{
949 "Point", // type
950 member.basetype, // basetype
951 GetMemberNameAtIndex(struct_type, i), // name
952 struct_member_offset, // offset
953 sizeof(Point), // size
954 stride * array_elements.value_or(1), // byte_length
955 array_elements, // array_elements
956 element_padding, // element_padding
957 });
958 current_byte_offset += stride * array_elements.value_or(1);
959 continue;
960 }
961
962 // Tightly packed Vector3.
963 if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
964 member.width == sizeof(float) * 8 && //
965 member.columns == 1 && //
966 member.vecsize == 3 //
967 ) {
968 uint32_t stride = GetArrayStride<sizeof(Vector3)>(struct_type, member, i);
969 uint32_t element_padding = stride - sizeof(Vector3);
970 result.emplace_back(StructMember{
971 "Vector3", // type
972 member.basetype, // basetype
973 GetMemberNameAtIndex(struct_type, i), // name
974 struct_member_offset, // offset
975 sizeof(Vector3), // size
976 stride * array_elements.value_or(1), // byte_length
977 array_elements, // array_elements
978 element_padding, // element_padding
979 });
980 current_byte_offset += stride * array_elements.value_or(1);
981 continue;
982 }
983
984 // Tightly packed Vector4.
985 if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
986 member.width == sizeof(float) * 8 && //
987 member.columns == 1 && //
988 member.vecsize == 4 //
989 ) {
990 uint32_t stride = GetArrayStride<sizeof(Vector4)>(struct_type, member, i);
991 uint32_t element_padding = stride - sizeof(Vector4);
992 result.emplace_back(StructMember{
993 "Vector4", // type
994 member.basetype, // basetype
995 GetMemberNameAtIndex(struct_type, i), // name
996 struct_member_offset, // offset
997 sizeof(Vector4), // size
998 stride * array_elements.value_or(1), // byte_length
999 array_elements, // array_elements
1000 element_padding, // element_padding
1001 });
1002 current_byte_offset += stride * array_elements.value_or(1);
1003 continue;
1004 }
1005
1006 // Tightly packed half Point (vec2).
1007 if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
1008 member.width == sizeof(Half) * 8 && //
1009 member.columns == 1 && //
1010 member.vecsize == 2 //
1011 ) {
1012 uint32_t stride =
1013 GetArrayStride<sizeof(HalfVector2)>(struct_type, member, i);
1014 uint32_t element_padding = stride - sizeof(HalfVector2);
1015 result.emplace_back(StructMember{
1016 "HalfVector2", // type
1017 member.basetype, // basetype
1018 GetMemberNameAtIndex(struct_type, i), // name
1019 struct_member_offset, // offset
1020 sizeof(HalfVector2), // size
1021 stride * array_elements.value_or(1), // byte_length
1022 array_elements, // array_elements
1023 element_padding, // element_padding
1024 });
1025 current_byte_offset += stride * array_elements.value_or(1);
1026 continue;
1027 }
1028
1029 // Tightly packed Half Float Vector3.
1030 if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
1031 member.width == sizeof(Half) * 8 && //
1032 member.columns == 1 && //
1033 member.vecsize == 3 //
1034 ) {
1035 uint32_t stride =
1036 GetArrayStride<sizeof(HalfVector3)>(struct_type, member, i);
1037 uint32_t element_padding = stride - sizeof(HalfVector3);
1038 result.emplace_back(StructMember{
1039 "HalfVector3", // type
1040 member.basetype, // basetype
1041 GetMemberNameAtIndex(struct_type, i), // name
1042 struct_member_offset, // offset
1043 sizeof(HalfVector3), // size
1044 stride * array_elements.value_or(1), // byte_length
1045 array_elements, // array_elements
1046 element_padding, // element_padding
1047 });
1048 current_byte_offset += stride * array_elements.value_or(1);
1049 continue;
1050 }
1051
1052 // Tightly packed Half Float Vector4.
1053 if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
1054 member.width == sizeof(Half) * 8 && //
1055 member.columns == 1 && //
1056 member.vecsize == 4 //
1057 ) {
1058 uint32_t stride =
1059 GetArrayStride<sizeof(HalfVector4)>(struct_type, member, i);
1060 uint32_t element_padding = stride - sizeof(HalfVector4);
1061 result.emplace_back(StructMember{
1062 "HalfVector4", // type
1063 member.basetype, // basetype
1064 GetMemberNameAtIndex(struct_type, i), // name
1065 struct_member_offset, // offset
1066 sizeof(HalfVector4), // size
1067 stride * array_elements.value_or(1), // byte_length
1068 array_elements, // array_elements
1069 element_padding, // element_padding
1070 });
1071 current_byte_offset += stride * array_elements.value_or(1);
1072 continue;
1073 }
1074
1075 // Other isolated scalars (like bool, int, float/Scalar, etc..).
1076 {
1077 auto maybe_known_type = ReadKnownScalarType(member.basetype);
1078 if (maybe_known_type.has_value() && //
1079 member.columns == 1 && //
1080 member.vecsize == 1 //
1081 ) {
1082 uint32_t stride = GetArrayStride<0>(struct_type, member, i);
1083 if (stride == 0) {
1084 stride = maybe_known_type.value().byte_size;
1085 }
1086 uint32_t element_padding = stride - maybe_known_type.value().byte_size;
1087 // Add the type directly.
1088 result.emplace_back(StructMember{
1089 maybe_known_type.value().name, // type
1090 member.basetype, // basetype
1091 GetMemberNameAtIndex(struct_type, i), // name
1092 struct_member_offset, // offset
1093 maybe_known_type.value().byte_size, // size
1094 stride * array_elements.value_or(1), // byte_length
1095 array_elements, // array_elements
1096 element_padding, // element_padding
1097 });
1098 current_byte_offset += stride * array_elements.value_or(1);
1099 continue;
1100 }
1101 }
1102
1103 // Catch all for unknown types. Just add the necessary padding to the struct
1104 // and move on.
1105 {
1106 const size_t size = (member.width * member.columns * member.vecsize) / 8u;
1107 uint32_t stride = GetArrayStride<0>(struct_type, member, i);
1108 if (stride == 0) {
1109 stride = size;
1110 }
1111 auto element_padding = stride - size;
1112 result.emplace_back(StructMember{
1113 TypeNameWithPaddingOfSize(size), // type
1114 member.basetype, // basetype
1115 GetMemberNameAtIndex(struct_type, i), // name
1116 struct_member_offset, // offset
1117 size, // size
1118 stride * array_elements.value_or(1), // byte_length
1119 array_elements, // array_elements
1120 element_padding, // element_padding
1121 });
1122 current_byte_offset += stride * array_elements.value_or(1);
1123 continue;
1124 }
1125 }
1126
1127 if (max_member_alignment > 0u) {
1128 const auto struct_length = current_byte_offset;
1129 {
1130 const auto excess = struct_length % max_member_alignment;
1131 if (excess != 0) {
1132 const auto padding = max_member_alignment - excess;
1133 result.emplace_back(StructMember{
1135 spirv_cross::SPIRType::BaseType::Void, // basetype
1136 "_PADDING_", // name
1137 current_byte_offset, // offset
1138 padding, // size
1139 padding, // byte_length
1140 std::nullopt, // array_elements
1141 0, // element_padding
1142 });
1143 }
1144 }
1145 }
1146
1147 return result;
1148}
1149
1150std::optional<Reflector::StructDefinition> Reflector::ReflectStructDefinition(
1151 const spirv_cross::TypeID& type_id) const {
1152 const auto& type = compiler_->get_type(type_id);
1153 if (type.basetype != spirv_cross::SPIRType::BaseType::Struct) {
1154 return std::nullopt;
1155 }
1156
1157 const auto struct_name = compiler_->get_name(type_id);
1158 if (struct_name.find("_RESERVED_IDENTIFIER_") != std::string::npos) {
1159 return std::nullopt;
1160 }
1161
1162 auto struct_members = ReadStructMembers(type_id);
1163 auto reflected_struct_size = GetReflectedStructSize(struct_members);
1164
1165 StructDefinition struc;
1166 struc.name = struct_name;
1167 struc.byte_length = reflected_struct_size;
1168 struc.members = std::move(struct_members);
1169 return struc;
1170}
1171
1172nlohmann::json::object_t Reflector::EmitStructDefinition(
1173 std::optional<Reflector::StructDefinition> struc) const {
1174 nlohmann::json::object_t result;
1175 result["name"] = struc->name;
1176 result["byte_length"] = struc->byte_length;
1177 auto& members = result["members"] = nlohmann::json::array_t{};
1178 for (const auto& struct_member : struc->members) {
1179 auto& member = members.emplace_back(nlohmann::json::object_t{});
1180 member["name"] = struct_member.name;
1181 member["type"] = struct_member.type;
1182 member["base_type"] =
1183 StructMember::BaseTypeToString(struct_member.base_type);
1184 member["offset"] = struct_member.offset;
1185 member["byte_length"] = struct_member.byte_length;
1186 if (struct_member.array_elements.has_value()) {
1187 member["array_elements"] = struct_member.array_elements.value();
1188 } else {
1189 member["array_elements"] = "std::nullopt";
1190 }
1191 member["element_padding"] = struct_member.element_padding;
1192 }
1193 return result;
1194}
1195
1197 std::string type_name;
1198 spirv_cross::SPIRType::BaseType base_type;
1199 std::string variable_name;
1200 size_t byte_length = 0u;
1201};
1202
1204 const spirv_cross::Compiler& compiler,
1205 const spirv_cross::Resource* resource) {
1206 VertexType result;
1207 result.variable_name = resource->name;
1208 const auto& type = compiler.get_type(resource->type_id);
1209 result.base_type = type.basetype;
1210 const auto total_size = type.columns * type.vecsize * type.width / 8u;
1211 result.byte_length = total_size;
1212
1213 if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1214 type.columns == 1u && type.vecsize == 2u &&
1215 type.width == sizeof(float) * 8u) {
1216 result.type_name = "Point";
1217 } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1218 type.columns == 1u && type.vecsize == 4u &&
1219 type.width == sizeof(float) * 8u) {
1220 result.type_name = "Vector4";
1221 } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1222 type.columns == 1u && type.vecsize == 3u &&
1223 type.width == sizeof(float) * 8u) {
1224 result.type_name = "Vector3";
1225 } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1226 type.columns == 1u && type.vecsize == 1u &&
1227 type.width == sizeof(float) * 8u) {
1228 result.type_name = "Scalar";
1229 } else if (type.basetype == spirv_cross::SPIRType::BaseType::Int &&
1230 type.columns == 1u && type.vecsize == 1u &&
1231 type.width == sizeof(int32_t) * 8u) {
1232 result.type_name = "int32_t";
1233 } else {
1234 // Catch all unknown padding.
1235 result.type_name = TypeNameWithPaddingOfSize(total_size);
1236 }
1237
1238 return result;
1239}
1240
1241std::optional<Reflector::StructDefinition>
1242Reflector::ReflectPerVertexStructDefinition(
1243 const spirv_cross::SmallVector<spirv_cross::Resource>& stage_inputs) const {
1244 // Avoid emitting a zero sized structure. The code gen templates assume a
1245 // non-zero size.
1246 if (stage_inputs.empty()) {
1247 return std::nullopt;
1248 }
1249
1250 // Validate locations are contiguous and there are no duplicates.
1251 std::set<uint32_t> locations;
1252 for (const auto& input : stage_inputs) {
1253 auto location = compiler_->get_decoration(
1254 input.id, spv::Decoration::DecorationLocation);
1255 if (locations.count(location) != 0) {
1256 // Duplicate location. Bail.
1257 return std::nullopt;
1258 }
1259 locations.insert(location);
1260 }
1261
1262 for (size_t i = 0; i < locations.size(); i++) {
1263 if (locations.count(i) != 1) {
1264 // Locations are not contiguous. This usually happens when a single stage
1265 // input takes multiple input slots. No reflection information can be
1266 // generated for such cases anyway. So bail! It is up to the shader author
1267 // to make sure one stage input maps to a single input slot.
1268 return std::nullopt;
1269 }
1270 }
1271
1272 auto input_for_location =
1273 [&](uint32_t queried_location) -> const spirv_cross::Resource* {
1274 for (const auto& input : stage_inputs) {
1275 auto location = compiler_->get_decoration(
1276 input.id, spv::Decoration::DecorationLocation);
1277 if (location == queried_location) {
1278 return &input;
1279 }
1280 }
1281 // This really cannot happen with all the validation above.
1283 return nullptr;
1284 };
1285
1286 StructDefinition struc;
1287 struc.name = "PerVertexData";
1288 struc.byte_length = 0u;
1289 for (size_t i = 0; i < locations.size(); i++) {
1290 auto resource = input_for_location(i);
1291 if (resource == nullptr) {
1292 return std::nullopt;
1293 }
1294 const auto vertex_type =
1295 VertexTypeFromInputResource(*compiler_.GetCompiler(), resource);
1296
1297 auto member = StructMember{
1298 vertex_type.type_name, // type
1299 vertex_type.base_type, // base type
1300 vertex_type.variable_name, // name
1301 struc.byte_length, // offset
1302 vertex_type.byte_length, // size
1303 vertex_type.byte_length, // byte_length
1304 std::nullopt, // array_elements
1305 0, // element_padding
1306 };
1307 struc.byte_length += vertex_type.byte_length;
1308 struc.members.emplace_back(std::move(member));
1309 }
1310 return struc;
1311}
1312
1313std::optional<std::string> Reflector::GetMemberNameAtIndexIfExists(
1314 const spirv_cross::SPIRType& parent_type,
1315 size_t index) const {
1316 if (parent_type.type_alias != 0) {
1317 return GetMemberNameAtIndexIfExists(
1318 compiler_->get_type(parent_type.type_alias), index);
1319 }
1320
1321 if (auto found = ir_->meta.find(parent_type.self); found != ir_->meta.end()) {
1322 const auto& members = found->second.members;
1323 if (index < members.size() && !members[index].alias.empty()) {
1324 return members[index].alias;
1325 }
1326 }
1327 return std::nullopt;
1328}
1329
1330std::string Reflector::GetMemberNameAtIndex(
1331 const spirv_cross::SPIRType& parent_type,
1332 size_t index,
1333 std::string suffix) const {
1334 if (auto name = GetMemberNameAtIndexIfExists(parent_type, index);
1335 name.has_value()) {
1336 return name.value();
1337 }
1338 static std::atomic_size_t sUnnamedMembersID;
1339 std::stringstream stream;
1340 stream << "unnamed_" << sUnnamedMembersID++ << suffix;
1341 return stream.str();
1342}
1343
1344std::vector<Reflector::BindPrototype> Reflector::ReflectBindPrototypes(
1345 const spirv_cross::ShaderResources& resources,
1346 spv::ExecutionModel execution_model) const {
1347 std::vector<BindPrototype> prototypes;
1348 for (const auto& uniform_buffer : resources.uniform_buffers) {
1349 auto& proto = prototypes.emplace_back(BindPrototype{});
1350 proto.return_type = "bool";
1351 proto.name = ToCamelCase(uniform_buffer.name);
1352 proto.descriptor_type = "DescriptorType::kUniformBuffer";
1353 {
1354 std::stringstream stream;
1355 stream << "Bind uniform buffer for resource named " << uniform_buffer.name
1356 << ".";
1357 proto.docstring = stream.str();
1358 }
1359 proto.args.push_back(BindPrototypeArgument{
1360 .type_name = "ResourceBinder&",
1361 .argument_name = "command",
1362 });
1363 proto.args.push_back(BindPrototypeArgument{
1364 .type_name = "BufferView",
1365 .argument_name = "view",
1366 });
1367 }
1368 for (const auto& storage_buffer : resources.storage_buffers) {
1369 auto& proto = prototypes.emplace_back(BindPrototype{});
1370 proto.return_type = "bool";
1371 proto.name = ToCamelCase(storage_buffer.name);
1372 proto.descriptor_type = "DescriptorType::kStorageBuffer";
1373 {
1374 std::stringstream stream;
1375 stream << "Bind storage buffer for resource named " << storage_buffer.name
1376 << ".";
1377 proto.docstring = stream.str();
1378 }
1379 proto.args.push_back(BindPrototypeArgument{
1380 .type_name = "ResourceBinder&",
1381 .argument_name = "command",
1382 });
1383 proto.args.push_back(BindPrototypeArgument{
1384 .type_name = "BufferView",
1385 .argument_name = "view",
1386 });
1387 }
1388 for (const auto& sampled_image : resources.sampled_images) {
1389 auto& proto = prototypes.emplace_back(BindPrototype{});
1390 proto.return_type = "bool";
1391 proto.name = ToCamelCase(sampled_image.name);
1392 proto.descriptor_type = "DescriptorType::kSampledImage";
1393 {
1394 std::stringstream stream;
1395 stream << "Bind combined image sampler for resource named "
1396 << sampled_image.name << ".";
1397 proto.docstring = stream.str();
1398 }
1399 proto.args.push_back(BindPrototypeArgument{
1400 .type_name = "ResourceBinder&",
1401 .argument_name = "command",
1402 });
1403 proto.args.push_back(BindPrototypeArgument{
1404 .type_name = "std::shared_ptr<const Texture>",
1405 .argument_name = "texture",
1406 });
1407 proto.args.push_back(BindPrototypeArgument{
1408 .type_name = "raw_ptr<const Sampler>",
1409 .argument_name = "sampler",
1410 });
1411 }
1412 for (const auto& separate_image : resources.separate_images) {
1413 auto& proto = prototypes.emplace_back(BindPrototype{});
1414 proto.return_type = "bool";
1415 proto.name = ToCamelCase(separate_image.name);
1416 proto.descriptor_type = "DescriptorType::kImage";
1417 {
1418 std::stringstream stream;
1419 stream << "Bind separate image for resource named " << separate_image.name
1420 << ".";
1421 proto.docstring = stream.str();
1422 }
1423 proto.args.push_back(BindPrototypeArgument{
1424 .type_name = "Command&",
1425 .argument_name = "command",
1426 });
1427 proto.args.push_back(BindPrototypeArgument{
1428 .type_name = "std::shared_ptr<const Texture>",
1429 .argument_name = "texture",
1430 });
1431 }
1432 for (const auto& separate_sampler : resources.separate_samplers) {
1433 auto& proto = prototypes.emplace_back(BindPrototype{});
1434 proto.return_type = "bool";
1435 proto.name = ToCamelCase(separate_sampler.name);
1436 proto.descriptor_type = "DescriptorType::kSampler";
1437 {
1438 std::stringstream stream;
1439 stream << "Bind separate sampler for resource named "
1440 << separate_sampler.name << ".";
1441 proto.docstring = stream.str();
1442 }
1443 proto.args.push_back(BindPrototypeArgument{
1444 .type_name = "Command&",
1445 .argument_name = "command",
1446 });
1447 proto.args.push_back(BindPrototypeArgument{
1448 .type_name = "std::shared_ptr<const Sampler>",
1449 .argument_name = "sampler",
1450 });
1451 }
1452 return prototypes;
1453}
1454
1455nlohmann::json::array_t Reflector::EmitBindPrototypes(
1456 const spirv_cross::ShaderResources& resources,
1457 spv::ExecutionModel execution_model) const {
1458 const auto prototypes = ReflectBindPrototypes(resources, execution_model);
1459 nlohmann::json::array_t result;
1460 for (const auto& res : prototypes) {
1461 auto& item = result.emplace_back(nlohmann::json::object_t{});
1462 item["return_type"] = res.return_type;
1463 item["name"] = res.name;
1464 item["docstring"] = res.docstring;
1465 item["descriptor_type"] = res.descriptor_type;
1466 auto& args = item["args"] = nlohmann::json::array_t{};
1467 for (const auto& arg : res.args) {
1468 auto& json_arg = args.emplace_back(nlohmann::json::object_t{});
1469 json_arg["type_name"] = arg.type_name;
1470 json_arg["argument_name"] = arg.argument_name;
1471 }
1472 }
1473 return result;
1474}
1475
1476} // namespace compiler
1477} // namespace impeller
GLenum type
static const char * kVulkanUBOName
Reflector(Options options, const std::shared_ptr< const spirv_cross::ParsedIR > &ir, const std::shared_ptr< fml::Mapping > &shader_data, const CompilerBackend &compiler)
Definition reflector.cc:63
std::shared_ptr< fml::Mapping > GetReflectionJSON() const
Definition reflector.cc:109
std::shared_ptr< fml::Mapping > GetReflectionCC() const
Definition reflector.cc:126
std::shared_ptr< RuntimeStageData::Shader > GetRuntimeStageShaderData() const
Definition reflector.cc:130
std::shared_ptr< ShaderBundleData > GetShaderBundleData() const
Definition reflector.cc:135
std::shared_ptr< fml::Mapping > GetReflectionHeader() const
Definition reflector.cc:122
static int input(yyscan_t yyscanner)
int32_t value
FlutterVulkanImage * image
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
#define FML_CHECK(condition)
Definition logging.h:104
#define FML_UNREACHABLE()
Definition logging.h:128
const char * name
Definition fuchsia.cc:49
Vector2 padding
The halo padding in source space.
std::array< MockImage, 3 > images
it will be possible to load the file into Perfetto s trace viewer use test Running tests that layout and measure text will not yield consistent results across various platforms Enabling this option will make font resolution default to the Ahem test font on all disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
static std::string TypeNameWithPaddingOfSize(size_t size)
Definition reflector.cc:744
static VertexType VertexTypeFromInputResource(const spirv_cross::Compiler &compiler, const spirv_cross::Resource *resource)
static std::string ToString(CompilerBackend::Type type)
Definition reflector.cc:576
static std::optional< RuntimeStageBackend > GetRuntimeStageBackend(TargetPlatform target_platform)
Definition reflector.cc:302
static size_t GetReflectedStructSize(const std::vector< StructMember > &members)
Get the reflected struct size. In the vast majority of the cases, this is the same as the declared st...
Definition reflector.cc:802
static std::string ExecutionModelToString(spv::ExecutionModel model)
Definition reflector.cc:34
static std::string StringToShaderStage(const std::string &str)
Definition reflector.cc:47
constexpr std::string_view kReflectionHeaderTemplate
std::string ToCamelCase(std::string_view string)
Definition utilities.cc:39
constexpr std::string_view kReflectionCCTemplate
static std::optional< KnownType > ReadKnownScalarType(spirv_cross::SPIRType::BaseType type)
Definition reflector.cc:755
bool StringStartsWith(const std::string &target, const std::string &prefix)
Definition utilities.cc:87
std::vector< spirv_cross::ID > SortUniforms(const spirv_cross::ParsedIR *ir, const spirv_cross::Compiler *compiler, std::optional< spirv_cross::SPIRType::BaseType > type_filter, bool include)
Sorts uniform declarations in an IR according to decoration order.
float Scalar
Definition scalar.h:19
TPoint< Scalar > Point
Definition point.h:327
TPoint< int32_t > IPoint32
Definition point.h:329
TPoint< uint32_t > UintPoint32
Definition point.h:330
Definition ref_ptr.h:261
A storage only class for half precision floating point.
Definition half.h:41
spirv_cross::Compiler * GetCompiler()
uint32_t GetExtendedMSLResourceBinding(ExtendedResourceIndex index, spirv_cross::ID id) const
static std::string BaseTypeToString(spirv_cross::SPIRType::BaseType type)
Definition reflector.h:44
spirv_cross::SPIRType::BaseType base_type
std::shared_ptr< const fml::Mapping > data
#define VALIDATION_LOG
Definition validation.h:91