Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
reflector.cc
Go to the documentation of this file.
1// Copyright 2013 The Flutter Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// FLUTTER_NOLINT: https://github.com/flutter/flutter/issues/105732
6
8
9#include <atomic>
10#include <optional>
11#include <set>
12#include <sstream>
13
14#include "flutter/fml/logging.h"
15#include "fml/backtrace.h"
28#include "spirv_common.hpp"
29
30namespace impeller {
31namespace compiler {
32
33static std::string ExecutionModelToString(spv::ExecutionModel model) {
34 switch (model) {
35 case spv::ExecutionModel::ExecutionModelVertex:
36 return "vertex";
37 case spv::ExecutionModel::ExecutionModelFragment:
38 return "fragment";
39 case spv::ExecutionModel::ExecutionModelGLCompute:
40 return "compute";
41 default:
42 return "unsupported";
43 }
44}
45
46static std::string StringToShaderStage(const std::string& str) {
47 if (str == "vertex") {
48 return "ShaderStage::kVertex";
49 }
50
51 if (str == "fragment") {
52 return "ShaderStage::kFragment";
53 }
54
55 if (str == "compute") {
56 return "ShaderStage::kCompute";
57 }
58
59 return "ShaderStage::kUnknown";
60}
61
63 const std::shared_ptr<const spirv_cross::ParsedIR>& ir,
64 const std::shared_ptr<fml::Mapping>& shader_data,
66 : options_(std::move(options)),
67 ir_(ir),
68 shader_data_(shader_data),
69 compiler_(compiler) {
70 if (!ir_ || !compiler_) {
71 return;
72 }
73
74 if (auto template_arguments = GenerateTemplateArguments();
75 template_arguments.has_value()) {
76 template_arguments_ =
77 std::make_unique<nlohmann::json>(std::move(template_arguments.value()));
78 } else {
79 return;
80 }
81
82 reflection_header_ = GenerateReflectionHeader();
83 if (!reflection_header_) {
84 return;
85 }
86
87 reflection_cc_ = GenerateReflectionCC();
88 if (!reflection_cc_) {
89 return;
90 }
91
92 runtime_stage_shader_ = GenerateRuntimeStageData();
93
94 shader_bundle_data_ = GenerateShaderBundleData();
95 if (!shader_bundle_data_) {
96 return;
97 }
98
99 is_valid_ = true;
100}
101
102Reflector::~Reflector() = default;
103
104bool Reflector::IsValid() const {
105 return is_valid_;
106}
107
108std::shared_ptr<fml::Mapping> Reflector::GetReflectionJSON() const {
109 if (!is_valid_) {
110 return nullptr;
111 }
112
113 auto json_string =
114 std::make_shared<std::string>(template_arguments_->dump(2u));
115
116 return std::make_shared<fml::NonOwnedMapping>(
117 reinterpret_cast<const uint8_t*>(json_string->data()),
118 json_string->size(), [json_string](auto, auto) {});
119}
120
121std::shared_ptr<fml::Mapping> Reflector::GetReflectionHeader() const {
122 return reflection_header_;
123}
124
125std::shared_ptr<fml::Mapping> Reflector::GetReflectionCC() const {
126 return reflection_cc_;
127}
128
129std::shared_ptr<RuntimeStageData::Shader> Reflector::GetRuntimeStageShaderData()
130 const {
131 return runtime_stage_shader_;
132}
133
134std::shared_ptr<ShaderBundleData> Reflector::GetShaderBundleData() const {
135 return shader_bundle_data_;
136}
137
138std::optional<nlohmann::json> Reflector::GenerateTemplateArguments() const {
139 nlohmann::json root;
140
141 const auto& entrypoints = compiler_->get_entry_points_and_stages();
142 if (entrypoints.size() != 1) {
143 VALIDATION_LOG << "Incorrect number of entrypoints in the shader. Found "
144 << entrypoints.size() << " but expected 1.";
145 return std::nullopt;
146 }
147
148 auto execution_model = entrypoints.front().execution_model;
149 {
150 root["entrypoint"] = options_.entry_point_name;
151 root["shader_name"] = options_.shader_name;
152 root["shader_stage"] = ExecutionModelToString(execution_model);
153 root["header_file_name"] = options_.header_file_name;
154 }
155
156 const auto shader_resources = compiler_->get_shader_resources();
157
158 // Subpass Inputs.
159 {
160 auto& subpass_inputs = root["subpass_inputs"] = nlohmann::json::array_t{};
161 if (auto subpass_inputs_json =
162 ReflectResources(shader_resources.subpass_inputs);
163 subpass_inputs_json.has_value()) {
164 for (auto subpass_input : subpass_inputs_json.value()) {
165 subpass_input["descriptor_type"] = "DescriptorType::kInputAttachment";
166 subpass_inputs.emplace_back(std::move(subpass_input));
167 }
168 } else {
169 return std::nullopt;
170 }
171 }
172
173 // Uniform and storage buffers.
174 {
175 auto& buffers = root["buffers"] = nlohmann::json::array_t{};
176 if (auto uniform_buffers_json =
177 ReflectResources(shader_resources.uniform_buffers);
178 uniform_buffers_json.has_value()) {
179 for (auto uniform_buffer : uniform_buffers_json.value()) {
180 uniform_buffer["descriptor_type"] = "DescriptorType::kUniformBuffer";
181 buffers.emplace_back(std::move(uniform_buffer));
182 }
183 } else {
184 return std::nullopt;
185 }
186 if (auto storage_buffers_json =
187 ReflectResources(shader_resources.storage_buffers);
188 storage_buffers_json.has_value()) {
189 for (auto uniform_buffer : storage_buffers_json.value()) {
190 uniform_buffer["descriptor_type"] = "DescriptorType::kStorageBuffer";
191 buffers.emplace_back(std::move(uniform_buffer));
192 }
193 } else {
194 return std::nullopt;
195 }
196 }
197
198 {
199 auto& stage_inputs = root["stage_inputs"] = nlohmann::json::array_t{};
200 if (auto stage_inputs_json = ReflectResources(
201 shader_resources.stage_inputs,
202 /*compute_offsets=*/execution_model == spv::ExecutionModelVertex);
203 stage_inputs_json.has_value()) {
204 stage_inputs = std::move(stage_inputs_json.value());
205 } else {
206 return std::nullopt;
207 }
208 }
209
210 {
211 auto combined_sampled_images =
212 ReflectResources(shader_resources.sampled_images);
213 auto images = ReflectResources(shader_resources.separate_images);
214 auto samplers = ReflectResources(shader_resources.separate_samplers);
215 if (!combined_sampled_images.has_value() || !images.has_value() ||
216 !samplers.has_value()) {
217 return std::nullopt;
218 }
219 auto& sampled_images = root["sampled_images"] = nlohmann::json::array_t{};
220 for (auto value : combined_sampled_images.value()) {
221 value["descriptor_type"] = "DescriptorType::kSampledImage";
222 sampled_images.emplace_back(std::move(value));
223 }
224 for (auto value : images.value()) {
225 value["descriptor_type"] = "DescriptorType::kImage";
226 sampled_images.emplace_back(std::move(value));
227 }
228 for (auto value : samplers.value()) {
229 value["descriptor_type"] = "DescriptorType::kSampledSampler";
230 sampled_images.emplace_back(std::move(value));
231 }
232 }
233
234 if (auto stage_outputs = ReflectResources(shader_resources.stage_outputs);
235 stage_outputs.has_value()) {
236 root["stage_outputs"] = std::move(stage_outputs.value());
237 } else {
238 return std::nullopt;
239 }
240
241 {
242 auto& struct_definitions = root["struct_definitions"] =
243 nlohmann::json::array_t{};
244 if (entrypoints.front().execution_model ==
245 spv::ExecutionModel::ExecutionModelVertex &&
246 !shader_resources.stage_inputs.empty()) {
247 if (auto struc =
248 ReflectPerVertexStructDefinition(shader_resources.stage_inputs);
249 struc.has_value()) {
250 struct_definitions.emplace_back(EmitStructDefinition(struc.value()));
251 } else {
252 // If there are stage inputs, it is an error to not generate a per
253 // vertex data struct for a vertex like shader stage.
254 return std::nullopt;
255 }
256 }
257
258 std::set<spirv_cross::ID> known_structs;
259 ir_->for_each_typed_id<spirv_cross::SPIRType>(
260 [&](uint32_t, const spirv_cross::SPIRType& type) {
261 if (type.basetype != spirv_cross::SPIRType::BaseType::Struct) {
262 return;
263 }
264 // Skip structs that do not have layout offset decorations.
265 // These structs are used internally within the shader and are not
266 // part of the shader's interface.
267 for (size_t i = 0; i < type.member_types.size(); i++) {
268 if (!compiler_->has_member_decoration(type.self, i,
269 spv::DecorationOffset)) {
270 return;
271 }
272 }
273 if (known_structs.find(type.self) != known_structs.end()) {
274 // Iterating over types this way leads to duplicates which may cause
275 // duplicate struct definitions.
276 return;
277 }
278 known_structs.insert(type.self);
279 if (auto struc = ReflectStructDefinition(type.self);
280 struc.has_value()) {
281 struct_definitions.emplace_back(
282 EmitStructDefinition(struc.value()));
283 }
284 });
285 }
286
287 root["bind_prototypes"] =
288 EmitBindPrototypes(shader_resources, execution_model);
289
290 return root;
291}
292
293std::shared_ptr<fml::Mapping> Reflector::GenerateReflectionHeader() const {
294 return InflateTemplate(kReflectionHeaderTemplate);
295}
296
297std::shared_ptr<fml::Mapping> Reflector::GenerateReflectionCC() const {
298 return InflateTemplate(kReflectionCCTemplate);
299}
300
301static std::optional<RuntimeStageBackend> GetRuntimeStageBackend(
302 TargetPlatform target_platform) {
303 switch (target_platform) {
310 return std::nullopt;
319 }
321}
322
323std::shared_ptr<RuntimeStageData::Shader> Reflector::GenerateRuntimeStageData()
324 const {
326 if (!backend.has_value()) {
327 return nullptr;
328 }
329
330 const auto& entrypoints = compiler_->get_entry_points_and_stages();
331 if (entrypoints.size() != 1u) {
332 VALIDATION_LOG << "Single entrypoint not found.";
333 return nullptr;
334 }
335 auto data = std::make_unique<RuntimeStageData::Shader>();
336 data->entrypoint = options_.entry_point_name;
337 data->stage = entrypoints.front().execution_model;
338 data->shader = shader_data_;
339 data->backend = backend.value();
340
341 // Sort the IR so that the uniforms are in declaration order.
342 std::vector<spirv_cross::ID> uniforms =
343 SortUniforms(ir_.get(), compiler_.GetCompiler());
344 for (auto& sorted_id : uniforms) {
345 auto var = ir_->ids[sorted_id].get<spirv_cross::SPIRVariable>();
346 const auto spir_type = compiler_->get_type(var.basetype);
347 UniformDescription uniform_description;
348 uniform_description.name = compiler_->get_name(var.self);
349 uniform_description.location = compiler_->get_decoration(
350 var.self, spv::Decoration::DecorationLocation);
351 uniform_description.binding =
352 compiler_->get_decoration(var.self, spv::Decoration::DecorationBinding);
353 uniform_description.type = spir_type.basetype;
354 uniform_description.rows = spir_type.vecsize;
355 uniform_description.columns = spir_type.columns;
356 uniform_description.bit_width = spir_type.width;
357 uniform_description.array_elements = GetArrayElements(spir_type);
359 spir_type.basetype ==
360 spirv_cross::SPIRType::BaseType::SampledImage)
361 << "Vulkan runtime effect had unexpected uniforms outside of the "
362 "uniform buffer object.";
363 data->uniforms.emplace_back(std::move(uniform_description));
364 }
365
366 const auto ubos = compiler_->get_shader_resources().uniform_buffers;
367 if (data->backend == RuntimeStageBackend::kVulkan && !ubos.empty()) {
368 if (ubos.size() != 1 && ubos[0].name != RuntimeStage::kVulkanUBOName) {
369 VALIDATION_LOG << "Expected a single UBO resource named "
370 "'"
372 << "' "
373 "for Vulkan runtime stage backend.";
374 return nullptr;
375 }
376
377 const auto& ubo = ubos[0];
378
379 auto members = ReadStructMembers(ubo.type_id);
380 std::vector<uint8_t> struct_layout;
381 size_t float_count = 0;
382
383 for (size_t i = 0; i < members.size(); i += 1) {
384 const auto& member = members[i];
385 std::vector<int> bytes;
386 switch (member.underlying_type) {
388 size_t padding_count =
389 (member.size + sizeof(float) - 1) / sizeof(float);
390 while (padding_count > 0) {
391 struct_layout.push_back(0);
392 padding_count--;
393 }
394 break;
395 }
397 size_t member_float_count = member.byte_length / sizeof(float);
398 float_count += member_float_count;
399 while (member_float_count > 0) {
400 struct_layout.push_back(1);
401 member_float_count--;
402 }
403 break;
404 }
406 VALIDATION_LOG << "Non-floating-type struct member " << member.name
407 << " is not supported.";
408 return nullptr;
409 }
410 }
411 data->uniforms.emplace_back(UniformDescription{
412 .name = ubo.name,
413 .location = 64, // Magic constant that must match the descriptor set
414 // location for fragment programs.
415 .binding = 64,
416 .type = spirv_cross::SPIRType::Struct,
417 .struct_layout = std::move(struct_layout),
418 .struct_float_count = float_count,
419 });
420 }
421
422 // We only need to worry about storing vertex attributes.
423 if (entrypoints.front().execution_model == spv::ExecutionModelVertex) {
424 const auto inputs = compiler_->get_shader_resources().stage_inputs;
425 auto input_offsets = ComputeOffsets(inputs);
426 for (const auto& input : inputs) {
427 std::optional<size_t> offset = GetOffset(input.id, input_offsets);
428
429 const auto type = compiler_->get_type(input.type_id);
430
431 InputDescription input_description;
432 input_description.name = input.name;
433 input_description.location = compiler_->get_decoration(
434 input.id, spv::Decoration::DecorationLocation);
435 input_description.set = compiler_->get_decoration(
436 input.id, spv::Decoration::DecorationDescriptorSet);
437 input_description.binding = compiler_->get_decoration(
438 input.id, spv::Decoration::DecorationBinding);
439 input_description.type = type.basetype;
440 input_description.bit_width = type.width;
441 input_description.vec_size = type.vecsize;
442 input_description.columns = type.columns;
443 input_description.offset = offset.value_or(0u);
444 data->inputs.emplace_back(std::move(input_description));
445 }
446 }
447
448 return data;
449}
450
451std::shared_ptr<ShaderBundleData> Reflector::GenerateShaderBundleData() const {
452 const auto& entrypoints = compiler_->get_entry_points_and_stages();
453 if (entrypoints.size() != 1u) {
454 VALIDATION_LOG << "Single entrypoint not found.";
455 return nullptr;
456 }
457 auto data = std::make_shared<ShaderBundleData>(
458 options_.entry_point_name, //
459 entrypoints.front().execution_model, //
460 options_.target_platform //
461 );
462 data->SetShaderData(shader_data_);
463
464 const auto uniforms = compiler_->get_shader_resources().uniform_buffers;
465 for (const auto& uniform : uniforms) {
466 ShaderBundleData::ShaderUniformStruct uniform_struct;
467 uniform_struct.name = uniform.name;
468 uniform_struct.ext_res_0 = compiler_.GetExtendedMSLResourceBinding(
470 uniform_struct.set = compiler_->get_decoration(
471 uniform.id, spv::Decoration::DecorationDescriptorSet);
472 uniform_struct.binding = compiler_->get_decoration(
473 uniform.id, spv::Decoration::DecorationBinding);
474
475 const auto type = compiler_->get_type(uniform.type_id);
476 if (type.basetype != spirv_cross::SPIRType::BaseType::Struct) {
477 std::cerr << "Error: Uniform \"" << uniform.name
478 << "\" is not a struct. All Flutter GPU shader uniforms must "
479 "be structs."
480 << std::endl;
481 return nullptr;
482 }
483
484 size_t size_in_bytes = 0;
485 for (const auto& struct_member : ReadStructMembers(uniform.type_id)) {
486 size_in_bytes += struct_member.byte_length;
487 if (StringStartsWith(struct_member.name, "_PADDING_")) {
488 continue;
489 }
490 ShaderBundleData::ShaderUniformStructField uniform_struct_field;
491 uniform_struct_field.name = struct_member.name;
492 uniform_struct_field.type = struct_member.base_type;
493 uniform_struct_field.offset_in_bytes = struct_member.offset;
494 uniform_struct_field.element_size_in_bytes = struct_member.size;
495 uniform_struct_field.total_size_in_bytes = struct_member.byte_length;
496 uniform_struct_field.array_elements = struct_member.array_elements;
497 uniform_struct.fields.push_back(uniform_struct_field);
498 }
499 uniform_struct.size_in_bytes = size_in_bytes;
500
501 data->AddUniformStruct(uniform_struct);
502 }
503
504 const auto sampled_images = compiler_->get_shader_resources().sampled_images;
505 for (const auto& image : sampled_images) {
506 ShaderBundleData::ShaderUniformTexture uniform_texture;
507 uniform_texture.name = image.name;
508 uniform_texture.ext_res_0 = compiler_.GetExtendedMSLResourceBinding(
510 uniform_texture.set = compiler_->get_decoration(
511 image.id, spv::Decoration::DecorationDescriptorSet);
512 uniform_texture.binding =
513 compiler_->get_decoration(image.id, spv::Decoration::DecorationBinding);
514 data->AddUniformTexture(uniform_texture);
515 }
516
517 // We only need to worry about storing vertex attributes.
518 if (entrypoints.front().execution_model == spv::ExecutionModelVertex) {
519 const auto inputs = compiler_->get_shader_resources().stage_inputs;
520 auto input_offsets = ComputeOffsets(inputs);
521 for (const auto& input : inputs) {
522 std::optional<size_t> offset = GetOffset(input.id, input_offsets);
523
524 const auto type = compiler_->get_type(input.type_id);
525
526 InputDescription input_description;
527 input_description.name = input.name;
528 input_description.location = compiler_->get_decoration(
529 input.id, spv::Decoration::DecorationLocation);
530 input_description.set = compiler_->get_decoration(
531 input.id, spv::Decoration::DecorationDescriptorSet);
532 input_description.binding = compiler_->get_decoration(
533 input.id, spv::Decoration::DecorationBinding);
534 input_description.type = type.basetype;
535 input_description.bit_width = type.width;
536 input_description.vec_size = type.vecsize;
537 input_description.columns = type.columns;
538 input_description.offset = offset.value_or(0u);
539 data->AddInputDescription(std::move(input_description));
540 }
541 }
542
543 return data;
544}
545
546std::optional<uint32_t> Reflector::GetArrayElements(
547 const spirv_cross::SPIRType& type) const {
548 if (type.array.empty()) {
549 return std::nullopt;
550 }
551 FML_CHECK(type.array.size() == 1)
552 << "Multi-dimensional arrays are not supported.";
553 FML_CHECK(type.array_size_literal.front())
554 << "Must use a literal for array sizes.";
555 return type.array.front();
556}
557
559 switch (type) {
561 return "Metal Shading Language";
563 return "OpenGL Shading Language";
565 return "OpenGL Shading Language (Relaxed Vulkan Semantics)";
567 return "SkSL Shading Language";
568 }
570}
571
572std::shared_ptr<fml::Mapping> Reflector::InflateTemplate(
573 std::string_view tmpl) const {
574 inja::Environment env;
575 env.set_trim_blocks(true);
576 env.set_lstrip_blocks(true);
577
578 env.add_callback("camel_case", 1u, [](inja::Arguments& args) {
579 return ToCamelCase(args.at(0u)->get<std::string>());
580 });
581
582 env.add_callback("to_shader_stage", 1u, [](inja::Arguments& args) {
583 return StringToShaderStage(args.at(0u)->get<std::string>());
584 });
585
586 env.add_callback("get_generator_name", 0u,
587 [type = compiler_.GetType()](inja::Arguments& args) {
588 return ToString(type);
589 });
590
591 auto inflated_template =
592 std::make_shared<std::string>(env.render(tmpl, *template_arguments_));
593
594 return std::make_shared<fml::NonOwnedMapping>(
595 reinterpret_cast<const uint8_t*>(inflated_template->data()),
596 inflated_template->size(), [inflated_template](auto, auto) {});
597}
598
599std::vector<size_t> Reflector::ComputeOffsets(
600 const spirv_cross::SmallVector<spirv_cross::Resource>& resources) const {
601 std::vector<size_t> offsets(resources.size(), 0);
602 if (resources.size() == 0) {
603 return offsets;
604 }
605 for (const auto& resource : resources) {
606 const auto type = compiler_->get_type(resource.type_id);
607 auto location = compiler_->get_decoration(
608 resource.id, spv::Decoration::DecorationLocation);
609 // Malformed shader, will be caught later on.
610 if (location >= resources.size() || location < 0) {
611 location = 0;
612 }
613 offsets[location] = (type.width * type.vecsize) / 8;
614 }
615 for (size_t i = 1; i < resources.size(); i++) {
616 offsets[i] += offsets[i - 1];
617 }
618 for (size_t i = resources.size() - 1; i > 0; i--) {
619 offsets[i] = offsets[i - 1];
620 }
621 offsets[0] = 0;
622
623 return offsets;
624}
625
626std::optional<size_t> Reflector::GetOffset(
627 spirv_cross::ID id,
628 const std::vector<size_t>& offsets) const {
629 uint32_t location =
630 compiler_->get_decoration(id, spv::Decoration::DecorationLocation);
631 if (location >= offsets.size()) {
632 return std::nullopt;
633 }
634 return offsets[location];
635}
636
637std::optional<nlohmann::json::object_t> Reflector::ReflectResource(
638 const spirv_cross::Resource& resource,
639 std::optional<size_t> offset) const {
640 nlohmann::json::object_t result;
641
642 result["name"] = resource.name;
643 result["descriptor_set"] = compiler_->get_decoration(
644 resource.id, spv::Decoration::DecorationDescriptorSet);
645 result["binding"] = compiler_->get_decoration(
646 resource.id, spv::Decoration::DecorationBinding);
647 result["set"] = compiler_->get_decoration(
648 resource.id, spv::Decoration::DecorationDescriptorSet);
649 result["location"] = compiler_->get_decoration(
650 resource.id, spv::Decoration::DecorationLocation);
651 result["index"] =
652 compiler_->get_decoration(resource.id, spv::Decoration::DecorationIndex);
653 result["ext_res_0"] = compiler_.GetExtendedMSLResourceBinding(
655 result["ext_res_1"] = compiler_.GetExtendedMSLResourceBinding(
657 auto type = ReflectType(resource.type_id);
658 if (!type.has_value()) {
659 return std::nullopt;
660 }
661 result["type"] = std::move(type.value());
662 result["offset"] = offset.value_or(0u);
663 return result;
664}
665
666std::optional<nlohmann::json::object_t> Reflector::ReflectType(
667 const spirv_cross::TypeID& type_id) const {
668 nlohmann::json::object_t result;
669
670 const auto type = compiler_->get_type(type_id);
671
672 result["type_name"] = StructMember::BaseTypeToString(type.basetype);
673 result["bit_width"] = type.width;
674 result["vec_size"] = type.vecsize;
675 result["columns"] = type.columns;
676 auto& members = result["members"] = nlohmann::json::array_t{};
677 if (type.basetype == spirv_cross::SPIRType::BaseType::Struct) {
678 for (const auto& struct_member : ReadStructMembers(type_id)) {
679 auto member = nlohmann::json::object_t{};
680 member["name"] = struct_member.name;
681 member["type"] = struct_member.type;
682 member["base_type"] =
683 StructMember::BaseTypeToString(struct_member.base_type);
684 member["offset"] = struct_member.offset;
685 member["size"] = struct_member.size;
686 member["byte_length"] = struct_member.byte_length;
687 if (struct_member.array_elements.has_value()) {
688 member["array_elements"] = struct_member.array_elements.value();
689 } else {
690 member["array_elements"] = "std::nullopt";
691 }
692 members.emplace_back(std::move(member));
693 }
694 }
695
696 return result;
697}
698
699std::optional<nlohmann::json::array_t> Reflector::ReflectResources(
700 const spirv_cross::SmallVector<spirv_cross::Resource>& resources,
701 bool compute_offsets) const {
702 nlohmann::json::array_t result;
703 result.reserve(resources.size());
704 std::vector<size_t> offsets;
705 if (compute_offsets) {
706 offsets = ComputeOffsets(resources);
707 }
708 for (const auto& resource : resources) {
709 std::optional<size_t> maybe_offset = std::nullopt;
710 if (compute_offsets) {
711 maybe_offset = GetOffset(resource.id, offsets);
712 }
713 if (auto reflected = ReflectResource(resource, maybe_offset);
714 reflected.has_value()) {
715 result.emplace_back(std::move(reflected.value()));
716 } else {
717 return std::nullopt;
718 }
719 }
720 return result;
721}
722
723static std::string TypeNameWithPaddingOfSize(size_t size) {
724 std::stringstream stream;
725 stream << "Padding<" << size << ">";
726 return stream.str();
727}
728
729struct KnownType {
730 std::string name;
731 size_t byte_size = 0;
732};
733
734static std::optional<KnownType> ReadKnownScalarType(
735 spirv_cross::SPIRType::BaseType type) {
736 switch (type) {
737 case spirv_cross::SPIRType::BaseType::Boolean:
738 return KnownType{
739 .name = "bool",
740 .byte_size = sizeof(bool),
741 };
742 case spirv_cross::SPIRType::BaseType::Float:
743 return KnownType{
744 .name = "Scalar",
745 .byte_size = sizeof(Scalar),
746 };
747 case spirv_cross::SPIRType::BaseType::Half:
748 return KnownType{
749 .name = "Half",
750 .byte_size = sizeof(Half),
751 };
752 case spirv_cross::SPIRType::BaseType::UInt:
753 return KnownType{
754 .name = "uint32_t",
755 .byte_size = sizeof(uint32_t),
756 };
757 case spirv_cross::SPIRType::BaseType::Int:
758 return KnownType{
759 .name = "int32_t",
760 .byte_size = sizeof(int32_t),
761 };
762 default:
763 break;
764 }
765 return std::nullopt;
766}
767
768//------------------------------------------------------------------------------
769/// @brief Get the reflected struct size. In the vast majority of the
770/// cases, this is the same as the declared struct size as given by
771/// the compiler. But, additional padding may need to be introduced
772/// after the end of the struct to keep in line with the alignment
773/// requirement of the individual struct members. This method
774/// figures out the actual size of the reflected struct that can be
775/// referenced in native code.
776///
777/// @param[in] members The members
778///
779/// @return The reflected structure size.
780///
781static size_t GetReflectedStructSize(const std::vector<StructMember>& members) {
782 auto struct_size = 0u;
783 for (const auto& member : members) {
784 struct_size += member.byte_length;
785 }
786 return struct_size;
787}
788
789std::vector<StructMember> Reflector::ReadStructMembers(
790 const spirv_cross::TypeID& type_id) const {
791 const auto& struct_type = compiler_->get_type(type_id);
792 FML_CHECK(struct_type.basetype == spirv_cross::SPIRType::BaseType::Struct);
793
794 std::vector<StructMember> result;
795
796 size_t current_byte_offset = 0;
797 size_t max_member_alignment = 0;
798
799 for (size_t i = 0; i < struct_type.member_types.size(); i++) {
800 const auto& member = compiler_->get_type(struct_type.member_types[i]);
801 const auto struct_member_offset =
802 compiler_->type_struct_member_offset(struct_type, i);
803 auto array_elements = GetArrayElements(member);
804
805 if (struct_member_offset > current_byte_offset) {
806 const auto alignment_pad = struct_member_offset - current_byte_offset;
807 result.emplace_back(StructMember{
808 TypeNameWithPaddingOfSize(alignment_pad), // type
809 spirv_cross::SPIRType::BaseType::Void, // basetype
810 SPrintF("_PADDING_%s_",
811 GetMemberNameAtIndex(struct_type, i).c_str()), // name
812 current_byte_offset, // offset
813 alignment_pad, // size
814 alignment_pad, // byte_length
815 std::nullopt, // array_elements
816 0, // element_padding
817 });
818 current_byte_offset += alignment_pad;
819 }
820
821 max_member_alignment =
822 std::max<size_t>(max_member_alignment,
823 (member.width / 8) * member.columns * member.vecsize);
824
825 FML_CHECK(current_byte_offset == struct_member_offset);
826
827 // A user defined struct.
828 if (member.basetype == spirv_cross::SPIRType::BaseType::Struct) {
829 const size_t size =
830 GetReflectedStructSize(ReadStructMembers(member.self));
831 uint32_t stride = GetArrayStride<0>(struct_type, member, i);
832 if (stride == 0) {
833 stride = size;
834 }
835 uint32_t element_padding = stride - size;
836 result.emplace_back(StructMember{
837 compiler_->get_name(member.self), // type
838 member.basetype, // basetype
839 GetMemberNameAtIndex(struct_type, i), // name
840 struct_member_offset, // offset
841 size, // size
842 stride * array_elements.value_or(1), // byte_length
843 array_elements, // array_elements
844 element_padding, // element_padding
845 });
846 current_byte_offset += stride * array_elements.value_or(1);
847 continue;
848 }
849
850 // Tightly packed 4x4 Matrix is special cased as we know how to work with
851 // those.
852 if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
853 member.width == sizeof(Scalar) * 8 && //
854 member.columns == 4 && //
855 member.vecsize == 4 //
856 ) {
857 uint32_t stride = GetArrayStride<sizeof(Matrix)>(struct_type, member, i);
858 uint32_t element_padding = stride - sizeof(Matrix);
859 result.emplace_back(StructMember{
860 "Matrix", // type
861 member.basetype, // basetype
862 GetMemberNameAtIndex(struct_type, i), // name
863 struct_member_offset, // offset
864 sizeof(Matrix), // size
865 stride * array_elements.value_or(1), // byte_length
866 array_elements, // array_elements
867 element_padding, // element_padding
868 });
869 current_byte_offset += stride * array_elements.value_or(1);
870 continue;
871 }
872
873 // Tightly packed UintPoint32 (uvec2)
874 if (member.basetype == spirv_cross::SPIRType::BaseType::UInt && //
875 member.width == sizeof(uint32_t) * 8 && //
876 member.columns == 1 && //
877 member.vecsize == 2 //
878 ) {
879 uint32_t stride =
880 GetArrayStride<sizeof(UintPoint32)>(struct_type, member, i);
881 uint32_t element_padding = stride - sizeof(UintPoint32);
882 result.emplace_back(StructMember{
883 "UintPoint32", // type
884 member.basetype, // basetype
885 GetMemberNameAtIndex(struct_type, i), // name
886 struct_member_offset, // offset
887 sizeof(UintPoint32), // size
888 stride * array_elements.value_or(1), // byte_length
889 array_elements, // array_elements
890 element_padding, // element_padding
891 });
892 current_byte_offset += stride * array_elements.value_or(1);
893 continue;
894 }
895
896 // Tightly packed UintPoint32 (ivec2)
897 if (member.basetype == spirv_cross::SPIRType::BaseType::Int && //
898 member.width == sizeof(int32_t) * 8 && //
899 member.columns == 1 && //
900 member.vecsize == 2 //
901 ) {
902 uint32_t stride =
903 GetArrayStride<sizeof(IPoint32)>(struct_type, member, i);
904 uint32_t element_padding = stride - sizeof(IPoint32);
905 result.emplace_back(StructMember{
906 "IPoint32", // type
907 member.basetype, // basetype
908 GetMemberNameAtIndex(struct_type, i), // name
909 struct_member_offset, // offset
910 sizeof(IPoint32), // size
911 stride * array_elements.value_or(1), // byte_length
912 array_elements, // array_elements
913 element_padding, // element_padding
914 });
915 current_byte_offset += stride * array_elements.value_or(1);
916 continue;
917 }
918
919 // Tightly packed Point (vec2).
920 if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
921 member.width == sizeof(float) * 8 && //
922 member.columns == 1 && //
923 member.vecsize == 2 //
924 ) {
925 uint32_t stride = GetArrayStride<sizeof(Point)>(struct_type, member, i);
926 uint32_t element_padding = stride - sizeof(Point);
927 result.emplace_back(StructMember{
928 "Point", // type
929 member.basetype, // basetype
930 GetMemberNameAtIndex(struct_type, i), // name
931 struct_member_offset, // offset
932 sizeof(Point), // size
933 stride * array_elements.value_or(1), // byte_length
934 array_elements, // array_elements
935 element_padding, // element_padding
936 });
937 current_byte_offset += stride * array_elements.value_or(1);
938 continue;
939 }
940
941 // Tightly packed Vector3.
942 if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
943 member.width == sizeof(float) * 8 && //
944 member.columns == 1 && //
945 member.vecsize == 3 //
946 ) {
947 uint32_t stride = GetArrayStride<sizeof(Vector3)>(struct_type, member, i);
948 uint32_t element_padding = stride - sizeof(Vector3);
949 result.emplace_back(StructMember{
950 "Vector3", // type
951 member.basetype, // basetype
952 GetMemberNameAtIndex(struct_type, i), // name
953 struct_member_offset, // offset
954 sizeof(Vector3), // size
955 stride * array_elements.value_or(1), // byte_length
956 array_elements, // array_elements
957 element_padding, // element_padding
958 });
959 current_byte_offset += stride * array_elements.value_or(1);
960 continue;
961 }
962
963 // Tightly packed Vector4.
964 if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
965 member.width == sizeof(float) * 8 && //
966 member.columns == 1 && //
967 member.vecsize == 4 //
968 ) {
969 uint32_t stride = GetArrayStride<sizeof(Vector4)>(struct_type, member, i);
970 uint32_t element_padding = stride - sizeof(Vector4);
971 result.emplace_back(StructMember{
972 "Vector4", // type
973 member.basetype, // basetype
974 GetMemberNameAtIndex(struct_type, i), // name
975 struct_member_offset, // offset
976 sizeof(Vector4), // size
977 stride * array_elements.value_or(1), // byte_length
978 array_elements, // array_elements
979 element_padding, // element_padding
980 });
981 current_byte_offset += stride * array_elements.value_or(1);
982 continue;
983 }
984
985 // Tightly packed half Point (vec2).
986 if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
987 member.width == sizeof(Half) * 8 && //
988 member.columns == 1 && //
989 member.vecsize == 2 //
990 ) {
991 uint32_t stride =
992 GetArrayStride<sizeof(HalfVector2)>(struct_type, member, i);
993 uint32_t element_padding = stride - sizeof(HalfVector2);
994 result.emplace_back(StructMember{
995 "HalfVector2", // type
996 member.basetype, // basetype
997 GetMemberNameAtIndex(struct_type, i), // name
998 struct_member_offset, // offset
999 sizeof(HalfVector2), // size
1000 stride * array_elements.value_or(1), // byte_length
1001 array_elements, // array_elements
1002 element_padding, // element_padding
1003 });
1004 current_byte_offset += stride * array_elements.value_or(1);
1005 continue;
1006 }
1007
1008 // Tightly packed Half Float Vector3.
1009 if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
1010 member.width == sizeof(Half) * 8 && //
1011 member.columns == 1 && //
1012 member.vecsize == 3 //
1013 ) {
1014 uint32_t stride =
1015 GetArrayStride<sizeof(HalfVector3)>(struct_type, member, i);
1016 uint32_t element_padding = stride - sizeof(HalfVector3);
1017 result.emplace_back(StructMember{
1018 "HalfVector3", // type
1019 member.basetype, // basetype
1020 GetMemberNameAtIndex(struct_type, i), // name
1021 struct_member_offset, // offset
1022 sizeof(HalfVector3), // size
1023 stride * array_elements.value_or(1), // byte_length
1024 array_elements, // array_elements
1025 element_padding, // element_padding
1026 });
1027 current_byte_offset += stride * array_elements.value_or(1);
1028 continue;
1029 }
1030
1031 // Tightly packed Half Float Vector4.
1032 if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
1033 member.width == sizeof(Half) * 8 && //
1034 member.columns == 1 && //
1035 member.vecsize == 4 //
1036 ) {
1037 uint32_t stride =
1038 GetArrayStride<sizeof(HalfVector4)>(struct_type, member, i);
1039 uint32_t element_padding = stride - sizeof(HalfVector4);
1040 result.emplace_back(StructMember{
1041 "HalfVector4", // type
1042 member.basetype, // basetype
1043 GetMemberNameAtIndex(struct_type, i), // name
1044 struct_member_offset, // offset
1045 sizeof(HalfVector4), // size
1046 stride * array_elements.value_or(1), // byte_length
1047 array_elements, // array_elements
1048 element_padding, // element_padding
1049 });
1050 current_byte_offset += stride * array_elements.value_or(1);
1051 continue;
1052 }
1053
1054 // Other isolated scalars (like bool, int, float/Scalar, etc..).
1055 {
1056 auto maybe_known_type = ReadKnownScalarType(member.basetype);
1057 if (maybe_known_type.has_value() && //
1058 member.columns == 1 && //
1059 member.vecsize == 1 //
1060 ) {
1061 uint32_t stride = GetArrayStride<0>(struct_type, member, i);
1062 if (stride == 0) {
1063 stride = maybe_known_type.value().byte_size;
1064 }
1065 uint32_t element_padding = stride - maybe_known_type.value().byte_size;
1066 // Add the type directly.
1067 result.emplace_back(StructMember{
1068 maybe_known_type.value().name, // type
1069 member.basetype, // basetype
1070 GetMemberNameAtIndex(struct_type, i), // name
1071 struct_member_offset, // offset
1072 maybe_known_type.value().byte_size, // size
1073 stride * array_elements.value_or(1), // byte_length
1074 array_elements, // array_elements
1075 element_padding, // element_padding
1076 });
1077 current_byte_offset += stride * array_elements.value_or(1);
1078 continue;
1079 }
1080 }
1081
1082 // Catch all for unknown types. Just add the necessary padding to the struct
1083 // and move on.
1084 {
1085 const size_t size = (member.width * member.columns * member.vecsize) / 8u;
1086 uint32_t stride = GetArrayStride<0>(struct_type, member, i);
1087 if (stride == 0) {
1088 stride = size;
1089 }
1090 auto element_padding = stride - size;
1091 result.emplace_back(StructMember{
1092 TypeNameWithPaddingOfSize(size), // type
1093 member.basetype, // basetype
1094 GetMemberNameAtIndex(struct_type, i), // name
1095 struct_member_offset, // offset
1096 size, // size
1097 stride * array_elements.value_or(1), // byte_length
1098 array_elements, // array_elements
1099 element_padding, // element_padding
1100 });
1101 current_byte_offset += stride * array_elements.value_or(1);
1102 continue;
1103 }
1104 }
1105
1106 if (max_member_alignment > 0u) {
1107 const auto struct_length = current_byte_offset;
1108 {
1109 const auto excess = struct_length % max_member_alignment;
1110 if (excess != 0) {
1111 const auto padding = max_member_alignment - excess;
1112 result.emplace_back(StructMember{
1113 TypeNameWithPaddingOfSize(padding), // type
1114 spirv_cross::SPIRType::BaseType::Void, // basetype
1115 "_PADDING_", // name
1116 current_byte_offset, // offset
1117 padding, // size
1118 padding, // byte_length
1119 std::nullopt, // array_elements
1120 0, // element_padding
1121 });
1122 }
1123 }
1124 }
1125
1126 return result;
1127}
1128
1129std::optional<Reflector::StructDefinition> Reflector::ReflectStructDefinition(
1130 const spirv_cross::TypeID& type_id) const {
1131 const auto& type = compiler_->get_type(type_id);
1132 if (type.basetype != spirv_cross::SPIRType::BaseType::Struct) {
1133 return std::nullopt;
1134 }
1135
1136 const auto struct_name = compiler_->get_name(type_id);
1137 if (struct_name.find("_RESERVED_IDENTIFIER_") != std::string::npos) {
1138 return std::nullopt;
1139 }
1140
1141 auto struct_members = ReadStructMembers(type_id);
1142 auto reflected_struct_size = GetReflectedStructSize(struct_members);
1143
1144 StructDefinition struc;
1145 struc.name = struct_name;
1146 struc.byte_length = reflected_struct_size;
1147 struc.members = std::move(struct_members);
1148 return struc;
1149}
1150
1151nlohmann::json::object_t Reflector::EmitStructDefinition(
1152 std::optional<Reflector::StructDefinition> struc) const {
1153 nlohmann::json::object_t result;
1154 result["name"] = struc->name;
1155 result["byte_length"] = struc->byte_length;
1156 auto& members = result["members"] = nlohmann::json::array_t{};
1157 for (const auto& struct_member : struc->members) {
1158 auto& member = members.emplace_back(nlohmann::json::object_t{});
1159 member["name"] = struct_member.name;
1160 member["type"] = struct_member.type;
1161 member["base_type"] =
1162 StructMember::BaseTypeToString(struct_member.base_type);
1163 member["offset"] = struct_member.offset;
1164 member["byte_length"] = struct_member.byte_length;
1165 if (struct_member.array_elements.has_value()) {
1166 member["array_elements"] = struct_member.array_elements.value();
1167 } else {
1168 member["array_elements"] = "std::nullopt";
1169 }
1170 member["element_padding"] = struct_member.element_padding;
1171 }
1172 return result;
1173}
1174
1176 std::string type_name;
1177 spirv_cross::SPIRType::BaseType base_type;
1178 std::string variable_name;
1179 size_t byte_length = 0u;
1180};
1181
1183 const spirv_cross::Compiler& compiler,
1184 const spirv_cross::Resource* resource) {
1186 result.variable_name = resource->name;
1187 const auto& type = compiler.get_type(resource->type_id);
1188 result.base_type = type.basetype;
1189 const auto total_size = type.columns * type.vecsize * type.width / 8u;
1190 result.byte_length = total_size;
1191
1192 if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1193 type.columns == 1u && type.vecsize == 2u &&
1194 type.width == sizeof(float) * 8u) {
1195 result.type_name = "Point";
1196 } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1197 type.columns == 1u && type.vecsize == 4u &&
1198 type.width == sizeof(float) * 8u) {
1199 result.type_name = "Vector4";
1200 } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1201 type.columns == 1u && type.vecsize == 3u &&
1202 type.width == sizeof(float) * 8u) {
1203 result.type_name = "Vector3";
1204 } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1205 type.columns == 1u && type.vecsize == 1u &&
1206 type.width == sizeof(float) * 8u) {
1207 result.type_name = "Scalar";
1208 } else if (type.basetype == spirv_cross::SPIRType::BaseType::Int &&
1209 type.columns == 1u && type.vecsize == 1u &&
1210 type.width == sizeof(int32_t) * 8u) {
1211 result.type_name = "int32_t";
1212 } else {
1213 // Catch all unknown padding.
1215 }
1216
1217 return result;
1218}
1219
1220std::optional<Reflector::StructDefinition>
1221Reflector::ReflectPerVertexStructDefinition(
1222 const spirv_cross::SmallVector<spirv_cross::Resource>& stage_inputs) const {
1223 // Avoid emitting a zero sized structure. The code gen templates assume a
1224 // non-zero size.
1225 if (stage_inputs.empty()) {
1226 return std::nullopt;
1227 }
1228
1229 // Validate locations are contiguous and there are no duplicates.
1230 std::set<uint32_t> locations;
1231 for (const auto& input : stage_inputs) {
1232 auto location = compiler_->get_decoration(
1233 input.id, spv::Decoration::DecorationLocation);
1234 if (locations.count(location) != 0) {
1235 // Duplicate location. Bail.
1236 return std::nullopt;
1237 }
1238 locations.insert(location);
1239 }
1240
1241 for (size_t i = 0; i < locations.size(); i++) {
1242 if (locations.count(i) != 1) {
1243 // Locations are not contiguous. This usually happens when a single stage
1244 // input takes multiple input slots. No reflection information can be
1245 // generated for such cases anyway. So bail! It is up to the shader author
1246 // to make sure one stage input maps to a single input slot.
1247 return std::nullopt;
1248 }
1249 }
1250
1251 auto input_for_location =
1252 [&](uint32_t queried_location) -> const spirv_cross::Resource* {
1253 for (const auto& input : stage_inputs) {
1254 auto location = compiler_->get_decoration(
1255 input.id, spv::Decoration::DecorationLocation);
1256 if (location == queried_location) {
1257 return &input;
1258 }
1259 }
1260 // This really cannot happen with all the validation above.
1262 return nullptr;
1263 };
1264
1265 StructDefinition struc;
1266 struc.name = "PerVertexData";
1267 struc.byte_length = 0u;
1268 for (size_t i = 0; i < locations.size(); i++) {
1269 auto resource = input_for_location(i);
1270 if (resource == nullptr) {
1271 return std::nullopt;
1272 }
1273 const auto vertex_type =
1274 VertexTypeFromInputResource(*compiler_.GetCompiler(), resource);
1275
1276 auto member = StructMember{
1277 vertex_type.type_name, // type
1278 vertex_type.base_type, // base type
1279 vertex_type.variable_name, // name
1280 struc.byte_length, // offset
1281 vertex_type.byte_length, // size
1282 vertex_type.byte_length, // byte_length
1283 std::nullopt, // array_elements
1284 0, // element_padding
1285 };
1286 struc.byte_length += vertex_type.byte_length;
1287 struc.members.emplace_back(std::move(member));
1288 }
1289 return struc;
1290}
1291
1292std::optional<std::string> Reflector::GetMemberNameAtIndexIfExists(
1293 const spirv_cross::SPIRType& parent_type,
1294 size_t index) const {
1295 if (parent_type.type_alias != 0) {
1296 return GetMemberNameAtIndexIfExists(
1297 compiler_->get_type(parent_type.type_alias), index);
1298 }
1299
1300 if (auto found = ir_->meta.find(parent_type.self); found != ir_->meta.end()) {
1301 const auto& members = found->second.members;
1302 if (index < members.size() && !members[index].alias.empty()) {
1303 return members[index].alias;
1304 }
1305 }
1306 return std::nullopt;
1307}
1308
1309std::string Reflector::GetMemberNameAtIndex(
1310 const spirv_cross::SPIRType& parent_type,
1311 size_t index,
1312 std::string suffix) const {
1313 if (auto name = GetMemberNameAtIndexIfExists(parent_type, index);
1314 name.has_value()) {
1315 return name.value();
1316 }
1317 static std::atomic_size_t sUnnamedMembersID;
1318 std::stringstream stream;
1319 stream << "unnamed_" << sUnnamedMembersID++ << suffix;
1320 return stream.str();
1321}
1322
1323std::vector<Reflector::BindPrototype> Reflector::ReflectBindPrototypes(
1324 const spirv_cross::ShaderResources& resources,
1325 spv::ExecutionModel execution_model) const {
1326 std::vector<BindPrototype> prototypes;
1327 for (const auto& uniform_buffer : resources.uniform_buffers) {
1328 auto& proto = prototypes.emplace_back(BindPrototype{});
1329 proto.return_type = "bool";
1330 proto.name = ToCamelCase(uniform_buffer.name);
1331 proto.descriptor_type = "DescriptorType::kUniformBuffer";
1332 {
1333 std::stringstream stream;
1334 stream << "Bind uniform buffer for resource named " << uniform_buffer.name
1335 << ".";
1336 proto.docstring = stream.str();
1337 }
1338 proto.args.push_back(BindPrototypeArgument{
1339 .type_name = "ResourceBinder&",
1340 .argument_name = "command",
1341 });
1342 proto.args.push_back(BindPrototypeArgument{
1343 .type_name = "BufferView",
1344 .argument_name = "view",
1345 });
1346 }
1347 for (const auto& storage_buffer : resources.storage_buffers) {
1348 auto& proto = prototypes.emplace_back(BindPrototype{});
1349 proto.return_type = "bool";
1350 proto.name = ToCamelCase(storage_buffer.name);
1351 proto.descriptor_type = "DescriptorType::kStorageBuffer";
1352 {
1353 std::stringstream stream;
1354 stream << "Bind storage buffer for resource named " << storage_buffer.name
1355 << ".";
1356 proto.docstring = stream.str();
1357 }
1358 proto.args.push_back(BindPrototypeArgument{
1359 .type_name = "ResourceBinder&",
1360 .argument_name = "command",
1361 });
1362 proto.args.push_back(BindPrototypeArgument{
1363 .type_name = "BufferView",
1364 .argument_name = "view",
1365 });
1366 }
1367 for (const auto& sampled_image : resources.sampled_images) {
1368 auto& proto = prototypes.emplace_back(BindPrototype{});
1369 proto.return_type = "bool";
1370 proto.name = ToCamelCase(sampled_image.name);
1371 proto.descriptor_type = "DescriptorType::kSampledImage";
1372 {
1373 std::stringstream stream;
1374 stream << "Bind combined image sampler for resource named "
1375 << sampled_image.name << ".";
1376 proto.docstring = stream.str();
1377 }
1378 proto.args.push_back(BindPrototypeArgument{
1379 .type_name = "ResourceBinder&",
1380 .argument_name = "command",
1381 });
1382 proto.args.push_back(BindPrototypeArgument{
1383 .type_name = "std::shared_ptr<const Texture>",
1384 .argument_name = "texture",
1385 });
1386 proto.args.push_back(BindPrototypeArgument{
1387 .type_name = "const std::unique_ptr<const Sampler>&",
1388 .argument_name = "sampler",
1389 });
1390 }
1391 for (const auto& separate_image : resources.separate_images) {
1392 auto& proto = prototypes.emplace_back(BindPrototype{});
1393 proto.return_type = "bool";
1394 proto.name = ToCamelCase(separate_image.name);
1395 proto.descriptor_type = "DescriptorType::kImage";
1396 {
1397 std::stringstream stream;
1398 stream << "Bind separate image for resource named " << separate_image.name
1399 << ".";
1400 proto.docstring = stream.str();
1401 }
1402 proto.args.push_back(BindPrototypeArgument{
1403 .type_name = "Command&",
1404 .argument_name = "command",
1405 });
1406 proto.args.push_back(BindPrototypeArgument{
1407 .type_name = "std::shared_ptr<const Texture>",
1408 .argument_name = "texture",
1409 });
1410 }
1411 for (const auto& separate_sampler : resources.separate_samplers) {
1412 auto& proto = prototypes.emplace_back(BindPrototype{});
1413 proto.return_type = "bool";
1414 proto.name = ToCamelCase(separate_sampler.name);
1415 proto.descriptor_type = "DescriptorType::kSampler";
1416 {
1417 std::stringstream stream;
1418 stream << "Bind separate sampler for resource named "
1419 << separate_sampler.name << ".";
1420 proto.docstring = stream.str();
1421 }
1422 proto.args.push_back(BindPrototypeArgument{
1423 .type_name = "Command&",
1424 .argument_name = "command",
1425 });
1426 proto.args.push_back(BindPrototypeArgument{
1427 .type_name = "std::shared_ptr<const Sampler>",
1428 .argument_name = "sampler",
1429 });
1430 }
1431 return prototypes;
1432}
1433
1434nlohmann::json::array_t Reflector::EmitBindPrototypes(
1435 const spirv_cross::ShaderResources& resources,
1436 spv::ExecutionModel execution_model) const {
1437 const auto prototypes = ReflectBindPrototypes(resources, execution_model);
1438 nlohmann::json::array_t result;
1439 for (const auto& res : prototypes) {
1440 auto& item = result.emplace_back(nlohmann::json::object_t{});
1441 item["return_type"] = res.return_type;
1442 item["name"] = res.name;
1443 item["docstring"] = res.docstring;
1444 item["descriptor_type"] = res.descriptor_type;
1445 auto& args = item["args"] = nlohmann::json::array_t{};
1446 for (const auto& arg : res.args) {
1447 auto& json_arg = args.emplace_back(nlohmann::json::object_t{});
1448 json_arg["type_name"] = arg.type_name;
1449 json_arg["argument_name"] = arg.argument_name;
1450 }
1451 }
1452 return result;
1453}
1454
1455} // namespace compiler
1456} // namespace impeller
const char * options
const char * backend
static size_t total_size(SkSBlockAllocator< N > &pool)
static const char * kVulkanUBOName
Reflector(Options options, const std::shared_ptr< const spirv_cross::ParsedIR > &ir, const std::shared_ptr< fml::Mapping > &shader_data, const CompilerBackend &compiler)
Definition reflector.cc:62
std::shared_ptr< fml::Mapping > GetReflectionJSON() const
Definition reflector.cc:108
std::shared_ptr< fml::Mapping > GetReflectionCC() const
Definition reflector.cc:125
std::shared_ptr< RuntimeStageData::Shader > GetRuntimeStageShaderData() const
Definition reflector.cc:129
std::shared_ptr< ShaderBundleData > GetShaderBundleData() const
Definition reflector.cc:134
std::shared_ptr< fml::Mapping > GetReflectionHeader() const
Definition reflector.cc:121
sk_sp< SkImage > image
Definition examples.cpp:29
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
uint8_t value
GAsyncResult * result
#define FML_CHECK(condition)
Definition logging.h:85
#define FML_UNREACHABLE()
Definition logging.h:109
const char * name
Definition fuchsia.cc:50
std::array< MockImage, 3 > images
Definition __init__.py:1
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot data
Definition switches.h:41
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition switches.h:259
static std::string TypeNameWithPaddingOfSize(size_t size)
Definition reflector.cc:723
static VertexType VertexTypeFromInputResource(const spirv_cross::Compiler &compiler, const spirv_cross::Resource *resource)
static std::string ToString(CompilerBackend::Type type)
Definition reflector.cc:558
static std::optional< RuntimeStageBackend > GetRuntimeStageBackend(TargetPlatform target_platform)
Definition reflector.cc:301
static size_t GetReflectedStructSize(const std::vector< StructMember > &members)
Get the reflected struct size. In the vast majority of the cases, this is the same as the declared st...
Definition reflector.cc:781
static std::string ExecutionModelToString(spv::ExecutionModel model)
Definition reflector.cc:33
static std::string StringToShaderStage(const std::string &str)
Definition reflector.cc:46
constexpr std::string_view kReflectionHeaderTemplate
std::string ToCamelCase(std::string_view string)
Definition utilities.cc:39
constexpr std::string_view kReflectionCCTemplate
static std::optional< KnownType > ReadKnownScalarType(spirv_cross::SPIRType::BaseType type)
Definition reflector.cc:734
bool StringStartsWith(const std::string &target, const std::string &prefix)
Definition utilities.cc:87
std::vector< spirv_cross::ID > SortUniforms(const spirv_cross::ParsedIR *ir, const spirv_cross::Compiler *compiler, std::optional< spirv_cross::SPIRType::BaseType > type_filter, bool include)
Sorts uniform declarations in an IR according to decoration order.
float Scalar
Definition scalar.h:18
TPoint< Scalar > Point
Definition point.h:316
TPoint< int32_t > IPoint32
Definition point.h:318
std::string SPrintF(const char *format,...)
Definition strings.cc:12
TPoint< uint32_t > UintPoint32
Definition point.h:319
Definition ref_ptr.h:256
Point offset
A storage only class for half precision floating point.
Definition half.h:41
spirv_cross::Compiler * GetCompiler()
uint32_t GetExtendedMSLResourceBinding(ExtendedResourceIndex index, spirv_cross::ID id) const
static std::string BaseTypeToString(spirv_cross::SPIRType::BaseType type)
Definition reflector.h:46
spirv_cross::SPIRType::BaseType base_type
#define VALIDATION_LOG
Definition validation.h:73