Hoist out parsing module from spirv_cross::Compiler.

This is a large refactor which splits out the SPIR-V parser from
Compiler and moves it into its more appropriately named Parser module.

The Parser is responsible for building a ParsedIR structure which is
then consumed by one or more compilers.

Compiler can take a ParsedIR by value or move reference. This should
allow for optimal case for both multiple compilations and single
compilation scenarios.
This commit is contained in:
Hans-Kristian Arntzen 2018-10-05 11:30:57 +02:00
parent cc5c0204d8
commit 5bcf02f7c9
21 changed files with 2436 additions and 1749 deletions

View File

@ -82,6 +82,10 @@ spirv_cross_add_library(spirv-cross-core spirv_cross_core STATIC
${CMAKE_CURRENT_SOURCE_DIR}/spirv.hpp
${CMAKE_CURRENT_SOURCE_DIR}/spirv_cross.hpp
${CMAKE_CURRENT_SOURCE_DIR}/spirv_cross.cpp
${CMAKE_CURRENT_SOURCE_DIR}/spirv_parser.hpp
${CMAKE_CURRENT_SOURCE_DIR}/spirv_parser.cpp
${CMAKE_CURRENT_SOURCE_DIR}/spirv_cross_parsed_ir.hpp
${CMAKE_CURRENT_SOURCE_DIR}/spirv_cross_parsed_ir.cpp
${CMAKE_CURRENT_SOURCE_DIR}/spirv_cfg.hpp
${CMAKE_CURRENT_SOURCE_DIR}/spirv_cfg.cpp)

View File

@ -19,6 +19,7 @@
#include "spirv_glsl.hpp"
#include "spirv_hlsl.hpp"
#include "spirv_msl.hpp"
#include "spirv_parser.hpp"
#include "spirv_reflect.hpp"
#include <algorithm>
#include <cstdio>
@ -190,7 +191,7 @@ static vector<uint32_t> read_spirv_file(const char *path)
FILE *file = fopen(path, "rb");
if (!file)
{
fprintf(stderr, "Failed to open SPIRV file: %s\n", path);
fprintf(stderr, "Failed to open SPIR-V file: %s\n", path);
return {};
}
@ -797,10 +798,17 @@ static int main_inner(int argc, char *argv[])
return EXIT_FAILURE;
}
auto spirv_file = read_spirv_file(args.input);
if (spirv_file.empty())
return EXIT_FAILURE;
Parser spirv_parser(move(spirv_file));
spirv_parser.parse();
// Special case reflection because it has little to do with the path followed by code-outputting compilers
if (!args.reflect.empty())
{
CompilerReflection compiler(read_spirv_file(args.input));
CompilerReflection compiler(move(spirv_parser.get_parsed_ir()));
compiler.set_format(args.reflect);
auto json = compiler.compile();
if (args.output)
@ -816,13 +824,13 @@ static int main_inner(int argc, char *argv[])
if (args.cpp)
{
compiler = unique_ptr<CompilerGLSL>(new CompilerCPP(read_spirv_file(args.input)));
compiler.reset(new CompilerCPP(move(spirv_parser.get_parsed_ir())));
if (args.cpp_interface_name)
static_cast<CompilerCPP *>(compiler.get())->set_interface_name(args.cpp_interface_name);
}
else if (args.msl)
{
compiler = unique_ptr<CompilerMSL>(new CompilerMSL(read_spirv_file(args.input)));
compiler.reset(new CompilerMSL(move(spirv_parser.get_parsed_ir())));
auto *msl_comp = static_cast<CompilerMSL *>(compiler.get());
auto msl_opts = msl_comp->get_msl_options();
@ -834,13 +842,13 @@ static int main_inner(int argc, char *argv[])
msl_comp->set_msl_options(msl_opts);
}
else if (args.hlsl)
compiler = unique_ptr<CompilerHLSL>(new CompilerHLSL(read_spirv_file(args.input)));
compiler.reset(new CompilerHLSL(move(spirv_parser.get_parsed_ir())));
else
{
combined_image_samplers = !args.vulkan_semantics;
if (!args.vulkan_semantics)
build_dummy_sampler = true;
compiler = unique_ptr<CompilerGLSL>(new CompilerGLSL(read_spirv_file(args.input)));
compiler.reset(new CompilerGLSL(move(spirv_parser.get_parsed_ir())));
}
if (!args.variable_type_remaps.empty())

View File

@ -131,6 +131,8 @@
<ClCompile Include="..\spirv_hlsl.cpp" />
<ClCompile Include="..\spirv_msl.cpp" />
<ClCompile Include="..\spirv_cfg.cpp" />
<ClCompile Include="..\spirv_parser.cpp" />
<ClCompile Include="..\spirv_cross_parsed_ir.cpp" />
<ClCompile Include="..\spirv_cross_util.cpp" />
</ItemGroup>
<ItemGroup>
@ -144,6 +146,8 @@
<ClInclude Include="..\spirv_hlsl.hpp" />
<ClInclude Include="..\spirv_msl.hpp" />
<ClInclude Include="..\spirv_cfg.hpp" />
<ClCompile Include="..\spirv_parser.hpp" />
<ClCompile Include="..\spirv_cross_parsed_ir.hpp" />
<ClInclude Include="..\spirv_cross_util.hpp" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />

View File

@ -36,6 +36,12 @@
<ClCompile Include="..\spirv_cfg.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\spirv_parser.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\spirv_cross_parsed_ir.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\spirv_hlsl.cpp">
<Filter>Source Files</Filter>
</ClCompile>
@ -71,6 +77,12 @@
<ClInclude Include="..\spirv_cfg.hpp">
<Filter>Header Files</Filter>
</ClInclude>
<ClCompile Include="..\spirv_parser.hpp">
<Filter>Header Files</Filter>
</ClCompile>
<ClCompile Include="..\spirv_cross_parsed_ir.hpp">
<Filter>Header Files</Filter>
</ClCompile>
<ClInclude Include="..\spirv_hlsl.hpp">
<Filter>Header Files</Filter>
</ClInclude>

View File

@ -42,8 +42,8 @@ namespace spirv_cross
#ifndef _MSC_VER
[[noreturn]]
#endif
inline void
report_and_abort(const std::string &msg)
inline void
report_and_abort(const std::string &msg)
{
#ifdef NDEBUG
(void)msg;
@ -68,6 +68,17 @@ public:
#define SPIRV_CROSS_THROW(x) throw CompilerError(x)
#endif
//#define SPIRV_CROSS_COPY_CONSTRUCTOR_SANITIZE
// MSVC 2013 does not have noexcept. We need this for Variant to get move constructor to work correctly
// instead of copy constructor.
// MSVC 2013 ignores that move constructors cannot throw in std::vector, so just don't define it.
#if defined(_MSC_VER) && _MSC_VER < 1900
#define SPIRV_CROSS_NOEXCEPT
#else
#define SPIRV_CROSS_NOEXCEPT noexcept
#endif
#if __cplusplus >= 201402l
#define SPIRV_CROSS_DEPRECATED(reason) [[deprecated(reason)]]
#elif defined(__GNUC__)
@ -282,21 +293,27 @@ inline std::string convert_to_string(double t)
struct Instruction
{
Instruction(const std::vector<uint32_t> &spirv, uint32_t &index);
uint16_t op;
uint16_t count;
uint32_t offset;
uint32_t length;
uint16_t op = 0;
uint16_t count = 0;
uint32_t offset = 0;
uint32_t length = 0;
};
// Helper for Variant interface.
struct IVariant
{
virtual ~IVariant() = default;
virtual std::unique_ptr<IVariant> clone() = 0;
uint32_t self = 0;
};
#define SPIRV_CROSS_DECLARE_CLONE(T) \
std::unique_ptr<IVariant> clone() override \
{ \
return std::unique_ptr<IVariant>(new T(*this)); \
}
enum Types
{
TypeNone,
@ -326,6 +343,8 @@ struct SPIRUndef : IVariant
{
}
uint32_t basetype;
SPIRV_CROSS_DECLARE_CLONE(SPIRUndef)
};
// This type is only used by backends which need to access the combined image and sampler IDs separately after
@ -345,6 +364,8 @@ struct SPIRCombinedImageSampler : IVariant
uint32_t combined_type;
uint32_t image;
uint32_t sampler;
SPIRV_CROSS_DECLARE_CLONE(SPIRCombinedImageSampler)
};
struct SPIRConstantOp : IVariant
@ -364,6 +385,8 @@ struct SPIRConstantOp : IVariant
spv::Op opcode;
std::vector<uint32_t> arguments;
uint32_t basetype;
SPIRV_CROSS_DECLARE_CLONE(SPIRConstantOp)
};
struct SPIRType : IVariant
@ -438,6 +461,8 @@ struct SPIRType : IVariant
// Used in backends to avoid emitting members with conflicting names.
std::unordered_set<std::string> member_name_cache;
SPIRV_CROSS_DECLARE_CLONE(SPIRType)
};
struct SPIRExtension : IVariant
@ -463,6 +488,7 @@ struct SPIRExtension : IVariant
}
Extension ext;
SPIRV_CROSS_DECLARE_CLONE(SPIRExtension)
};
// SPIREntryPoint is not a variant since its IDs are used to decorate OpFunction,
@ -533,6 +559,8 @@ struct SPIRExpression : IVariant
// A list of expressions which this expression depends on.
std::vector<uint32_t> expression_dependencies;
SPIRV_CROSS_DECLARE_CLONE(SPIRExpression)
};
struct SPIRFunctionPrototype : IVariant
@ -549,6 +577,8 @@ struct SPIRFunctionPrototype : IVariant
uint32_t return_type;
std::vector<uint32_t> parameter_types;
SPIRV_CROSS_DECLARE_CLONE(SPIRFunctionPrototype)
};
struct SPIRBlock : IVariant
@ -684,6 +714,8 @@ struct SPIRBlock : IVariant
// sub-group-like operations.
// Make sure that we only use these expressions in the original block.
std::vector<uint32_t> invalidate_expressions;
SPIRV_CROSS_DECLARE_CLONE(SPIRBlock)
};
struct SPIRFunction : IVariant
@ -769,6 +801,8 @@ struct SPIRFunction : IVariant
bool active = false;
bool flush_undeclared = true;
bool do_combined_parameters = true;
SPIRV_CROSS_DECLARE_CLONE(SPIRFunction)
};
struct SPIRAccessChain : IVariant
@ -803,6 +837,8 @@ struct SPIRAccessChain : IVariant
uint32_t matrix_stride = 0;
bool row_major_matrix = false;
bool immutable = false;
SPIRV_CROSS_DECLARE_CLONE(SPIRAccessChain)
};
struct SPIRVariable : IVariant
@ -856,6 +892,8 @@ struct SPIRVariable : IVariant
bool loop_variable_enable = false;
SPIRFunction::Parameter *parameter = nullptr;
SPIRV_CROSS_DECLARE_CLONE(SPIRVariable)
};
struct SPIRConstant : IVariant
@ -1111,6 +1149,8 @@ struct SPIRConstant : IVariant
// For composites which are constant arrays, etc.
std::vector<uint32_t> subconstants;
SPIRV_CROSS_DECLARE_CLONE(SPIRConstant)
};
class Variant
@ -1118,21 +1158,50 @@ class Variant
public:
// MSVC 2013 workaround, we shouldn't need these constructors.
Variant() = default;
Variant(Variant &&other)
// Marking custom move constructor as noexcept is important.
Variant(Variant &&other) SPIRV_CROSS_NOEXCEPT
{
*this = std::move(other);
}
Variant &operator=(Variant &&other)
Variant(const Variant &variant)
{
*this = variant;
}
// Marking custom move constructor as noexcept is important.
Variant &operator=(Variant &&other) SPIRV_CROSS_NOEXCEPT
{
if (this != &other)
{
holder = move(other.holder);
holder = std::move(other.holder);
type = other.type;
allow_type_rewrite = other.allow_type_rewrite;
other.type = TypeNone;
}
return *this;
}
// This copy/clone should only be called in the Compiler constructor.
// If this is called inside ::compile(), we invalidate any references we took higher in the stack.
// This should never happen.
Variant &operator=(const Variant &other)
{
#ifdef SPIRV_CROSS_COPY_CONSTRUCTOR_SANITIZE
abort();
#endif
if (this != &other)
{
holder.reset();
if (other.holder)
holder = other.holder->clone();
type = other.type;
allow_type_rewrite = other.allow_type_rewrite;
}
return *this;
}
void set(std::unique_ptr<IVariant> val, uint32_t new_type)
{
holder = std::move(val);
@ -1166,14 +1235,17 @@ public:
{
return type;
}
uint32_t get_id() const
{
return holder ? holder->self : 0;
}
bool empty() const
{
return !holder;
}
void reset()
{
holder.reset();

View File

@ -27,8 +27,8 @@ void CompilerCPP::emit_buffer_block(const SPIRVariable &var)
auto &type = get<SPIRType>(var.basetype);
auto instance_name = to_name(var.self);
uint32_t descriptor_set = meta[var.self].decoration.set;
uint32_t binding = meta[var.self].decoration.binding;
uint32_t descriptor_set = ir.meta[var.self].decoration.set;
uint32_t binding = ir.meta[var.self].decoration.binding;
emit_block_struct(type);
auto buffer_name = to_name(type.self);
@ -49,10 +49,10 @@ void CompilerCPP::emit_interface_block(const SPIRVariable &var)
const char *qual = var.storage == StorageClassInput ? "StageInput" : "StageOutput";
const char *lowerqual = var.storage == StorageClassInput ? "stage_input" : "stage_output";
auto instance_name = to_name(var.self);
uint32_t location = meta[var.self].decoration.location;
uint32_t location = ir.meta[var.self].decoration.location;
string buffer_name;
auto flags = meta[type.self].decoration.decoration_flags;
auto flags = ir.meta[type.self].decoration.decoration_flags;
if (flags.get(DecorationBlock))
{
emit_block_struct(type);
@ -83,9 +83,9 @@ void CompilerCPP::emit_uniform(const SPIRVariable &var)
auto &type = get<SPIRType>(var.basetype);
auto instance_name = to_name(var.self);
uint32_t descriptor_set = meta[var.self].decoration.set;
uint32_t binding = meta[var.self].decoration.binding;
uint32_t location = meta[var.self].decoration.location;
uint32_t descriptor_set = ir.meta[var.self].decoration.set;
uint32_t binding = ir.meta[var.self].decoration.binding;
uint32_t location = ir.meta[var.self].decoration.location;
string type_name = type_to_glsl(type);
remap_variable_type_name(type, instance_name, type_name);
@ -114,7 +114,7 @@ void CompilerCPP::emit_push_constant_block(const SPIRVariable &var)
add_resource_name(var.self);
auto &type = get<SPIRType>(var.basetype);
auto &flags = meta[var.self].decoration.decoration_flags;
auto &flags = ir.meta[var.self].decoration.decoration_flags;
if (flags.get(DecorationBinding) || flags.get(DecorationDescriptorSet))
SPIRV_CROSS_THROW("Push constant blocks cannot be compiled to GLSL with Binding or Set syntax. "
"Remap to location with reflection API first or disable these decorations.");
@ -145,14 +145,14 @@ void CompilerCPP::emit_resources()
{
// Output all basic struct types which are not Block or BufferBlock as these are declared inplace
// when such variables are instantiated.
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeType)
{
auto &type = id.get<SPIRType>();
if (type.basetype == SPIRType::Struct && type.array.empty() && !type.pointer &&
(!meta[type.self].decoration.decoration_flags.get(DecorationBlock) &&
!meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock)))
(!ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) &&
!ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock)))
{
emit_struct(type);
}
@ -163,7 +163,7 @@ void CompilerCPP::emit_resources()
begin_scope();
// Output UBOs and SSBOs
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
@ -172,8 +172,8 @@ void CompilerCPP::emit_resources()
if (var.storage != StorageClassFunction && type.pointer && type.storage == StorageClassUniform &&
!is_hidden_variable(var) &&
(meta[type.self].decoration.decoration_flags.get(DecorationBlock) ||
meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock)))
(ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) ||
ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock)))
{
emit_buffer_block(var);
}
@ -181,7 +181,7 @@ void CompilerCPP::emit_resources()
}
// Output push constant blocks
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
@ -196,7 +196,7 @@ void CompilerCPP::emit_resources()
}
// Output in/out interfaces.
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
@ -213,7 +213,7 @@ void CompilerCPP::emit_resources()
}
// Output Uniform Constants (values, samplers, images, etc).
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
@ -318,7 +318,7 @@ string CompilerCPP::compile()
emit_header();
emit_resources();
emit_function(get<SPIRFunction>(entry_point), Bitset());
emit_function(get<SPIRFunction>(ir.default_entry_point), Bitset());
pass_count++;
} while (force_recompile);
@ -376,7 +376,7 @@ void CompilerCPP::emit_c_linkage()
void CompilerCPP::emit_function_prototype(SPIRFunction &func, const Bitset &)
{
if (func.self != entry_point)
if (func.self != ir.default_entry_point)
add_function_overload(func);
local_variable_names = resource_names;
@ -387,7 +387,7 @@ void CompilerCPP::emit_function_prototype(SPIRFunction &func, const Bitset &)
decl += type_to_glsl(type);
decl += " ";
if (func.self == entry_point)
if (func.self == ir.default_entry_point)
{
decl += "main";
processing_entry_point = true;

View File

@ -26,13 +26,23 @@ namespace spirv_cross
class CompilerCPP : public CompilerGLSL
{
public:
CompilerCPP(std::vector<uint32_t> spirv_)
explicit CompilerCPP(std::vector<uint32_t> spirv_)
: CompilerGLSL(move(spirv_))
{
}
CompilerCPP(const uint32_t *ir, size_t word_count)
: CompilerGLSL(ir, word_count)
CompilerCPP(const uint32_t *ir_, size_t word_count)
: CompilerGLSL(ir_, word_count)
{
}
explicit CompilerCPP(const ParsedIR &ir_)
: CompilerGLSL(ir_)
{
}
explicit CompilerCPP(ParsedIR &&ir_)
: CompilerGLSL(std::move(ir_))
{
}

File diff suppressed because it is too large Load Diff

View File

@ -19,7 +19,7 @@
#include "spirv.hpp"
#include "spirv_cfg.hpp"
#include "spirv_common.hpp"
#include "spirv_cross_parsed_ir.hpp"
namespace spirv_cross
{
@ -121,9 +121,16 @@ public:
friend class DominatorBuilder;
// The constructor takes a buffer of SPIR-V words and parses it.
Compiler(std::vector<uint32_t> ir);
// It will create its own parser, parse the SPIR-V and move the parsed IR
// as if you had called the constructors taking ParsedIR directly.
explicit Compiler(std::vector<uint32_t> ir);
Compiler(const uint32_t *ir, size_t word_count);
// This is more modular. We can also consume a ParsedIR structure directly, either as a move, or copy.
// With copy, we can reuse the same parsed IR for multiple Compiler instances.
explicit Compiler(const ParsedIR &ir);
explicit Compiler(ParsedIR &&ir);
virtual ~Compiler() = default;
// After parsing, API users can modify the SPIR-V via reflection and call this
@ -160,7 +167,7 @@ public:
uint32_t get_decoration(uint32_t id, spv::Decoration decoration) const;
const std::string &get_decoration_string(uint32_t id, spv::Decoration decoration) const;
// Removes the decoration for a an ID.
// Removes the decoration for an ID.
void unset_decoration(uint32_t id, spv::Decoration decoration);
// Gets the SPIR-V type associated with ID.
@ -444,7 +451,7 @@ public:
uint32_t get_current_id_bound() const
{
return uint32_t(ids.size());
return uint32_t(ir.ids.size());
}
// API for querying buffer objects.
@ -522,20 +529,19 @@ protected:
if (!instr.length)
return nullptr;
if (instr.offset + instr.length > spirv.size())
if (instr.offset + instr.length > ir.spirv.size())
SPIRV_CROSS_THROW("Compiler::stream() out of range.");
return &spirv[instr.offset];
return &ir.spirv[instr.offset];
}
std::vector<uint32_t> spirv;
std::vector<Instruction> inst;
std::vector<Variant> ids;
std::vector<Meta> meta;
ParsedIR ir;
// Marks variables which have global scope and variables which can alias with other variables
// (SSBO, image load store, etc)
std::vector<uint32_t> global_variables;
std::vector<uint32_t> aliased_variables;
SPIRFunction *current_function = nullptr;
SPIRBlock *current_block = nullptr;
std::vector<uint32_t> global_variables;
std::vector<uint32_t> aliased_variables;
std::unordered_set<uint32_t> active_interface_variables;
bool check_active_interface_variables = false;
@ -544,7 +550,7 @@ protected:
template <typename T, typename... P>
T &set(uint32_t id, P &&... args)
{
auto &var = variant_set<T>(ids.at(id), std::forward<P>(args)...);
auto &var = variant_set<T>(ir.ids.at(id), std::forward<P>(args)...);
var.self = id;
return var;
}
@ -552,13 +558,13 @@ protected:
template <typename T>
T &get(uint32_t id)
{
return variant_get<T>(ids.at(id));
return variant_get<T>(ir.ids.at(id));
}
template <typename T>
T *maybe_get(uint32_t id)
{
if (ids.at(id).get_type() == T::type)
if (ir.ids.at(id).get_type() == T::type)
return &get<T>(id);
else
return nullptr;
@ -567,42 +573,21 @@ protected:
template <typename T>
const T &get(uint32_t id) const
{
return variant_get<T>(ids.at(id));
return variant_get<T>(ir.ids.at(id));
}
template <typename T>
const T *maybe_get(uint32_t id) const
{
if (ids.at(id).get_type() == T::type)
if (ir.ids.at(id).get_type() == T::type)
return &get<T>(id);
else
return nullptr;
}
uint32_t entry_point = 0;
// Normally, we'd stick SPIREntryPoint in ids array, but it conflicts with SPIRFunction.
// Entry points can therefore be seen as some sort of meta structure.
std::unordered_map<uint32_t, SPIREntryPoint> entry_points;
const SPIREntryPoint &get_entry_point() const;
SPIREntryPoint &get_entry_point();
struct Source
{
uint32_t version = 0;
bool es = false;
bool known = false;
bool hlsl = false;
Source() = default;
} source;
std::unordered_set<uint32_t> loop_blocks;
std::unordered_set<uint32_t> continue_blocks;
std::unordered_set<uint32_t> loop_merge_targets;
std::unordered_set<uint32_t> selection_merge_targets;
std::unordered_set<uint32_t> multiselect_merge_targets;
std::unordered_map<uint32_t, uint32_t> continue_block_to_loop_header;
virtual std::string to_name(uint32_t id, bool allow_alias = true) const;
bool is_builtin_variable(const SPIRVariable &var) const;
bool is_builtin_type(const SPIRType &type) const;
@ -618,14 +603,13 @@ protected:
bool expression_is_lvalue(uint32_t id) const;
bool variable_storage_is_aliased(const SPIRVariable &var);
SPIRVariable *maybe_get_backing_variable(uint32_t chain);
void mark_used_as_array_length(uint32_t id);
void register_read(uint32_t expr, uint32_t chain, bool forwarded);
void register_write(uint32_t chain);
inline bool is_continue(uint32_t next) const
{
return continue_blocks.find(next) != end(continue_blocks);
return (ir.block_meta[next] & ParsedIR::BLOCK_META_CONTINUE_BIT) != 0;
}
inline bool is_single_block_loop(uint32_t next) const
@ -636,19 +620,19 @@ protected:
inline bool is_break(uint32_t next) const
{
return loop_merge_targets.find(next) != end(loop_merge_targets) ||
multiselect_merge_targets.find(next) != end(multiselect_merge_targets);
return (ir.block_meta[next] &
(ParsedIR::BLOCK_META_LOOP_MERGE_BIT | ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT)) != 0;
}
inline bool is_loop_break(uint32_t next) const
{
return loop_merge_targets.find(next) != end(loop_merge_targets);
return (ir.block_meta[next] & ParsedIR::BLOCK_META_LOOP_MERGE_BIT) != 0;
}
inline bool is_conditional(uint32_t next) const
{
return selection_merge_targets.find(next) != end(selection_merge_targets) &&
multiselect_merge_targets.find(next) == end(multiselect_merge_targets);
return (ir.block_meta[next] &
(ParsedIR::BLOCK_META_SELECTION_MERGE_BIT | ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT)) != 0;
}
// Dependency tracking for temporaries read from variables.
@ -675,8 +659,6 @@ protected:
bool block_is_loop_candidate(const SPIRBlock &block, SPIRBlock::Method method) const;
uint32_t increase_bound_by(uint32_t incr_amount);
bool types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const;
void inherit_expression_dependencies(uint32_t dst, uint32_t source);
@ -692,8 +674,9 @@ protected:
variable_remap_callback(type, var_name, type_name);
}
void parse();
void parse(const Instruction &i);
void set_ir(const ParsedIR &parsed);
void set_ir(ParsedIR &&parsed);
void parse_fixup();
// Used internally to implement various traversals for queries.
struct OpcodeHandler
@ -813,7 +796,6 @@ protected:
VariableTypeRemapCallback variable_remap_callback;
Bitset get_buffer_block_flags(const SPIRVariable &var) const;
bool get_common_basic_type(const SPIRType &type, SPIRType::BaseType &base_type);
std::unordered_set<uint32_t> forced_temporaries;
@ -934,8 +916,6 @@ protected:
void make_constant_null(uint32_t id, uint32_t type);
std::vector<spv::Capability> declared_capabilities;
std::vector<std::string> declared_extensions;
std::unordered_map<uint32_t, std::string> declared_block_names;
bool instruction_to_result_type(uint32_t &result_type, uint32_t &result_id, spv::Op op, const uint32_t *args,

557
spirv_cross_parsed_ir.cpp Normal file
View File

@ -0,0 +1,557 @@
/*
* Copyright 2018 Arm Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "spirv_cross_parsed_ir.hpp"
#include <assert.h>
using namespace std;
using namespace spv;
namespace spirv_cross
{
void ParsedIR::set_id_bounds(uint32_t bounds)
{
ids.resize(bounds);
meta.resize(bounds);
block_meta.resize(bounds);
}
static string ensure_valid_identifier(const string &name, bool member)
{
// Functions in glslangValidator are mangled with name(<mangled> stuff.
// Normally, we would never see '(' in any legal identifiers, so just strip them out.
auto str = name.substr(0, name.find('('));
for (uint32_t i = 0; i < str.size(); i++)
{
auto &c = str[i];
if (member)
{
// _m<num> variables are reserved by the internal implementation,
// otherwise, make sure the name is a valid identifier.
if (i == 0)
c = isalpha(c) ? c : '_';
else if (i == 2 && str[0] == '_' && str[1] == 'm')
c = isalpha(c) ? c : '_';
else
c = isalnum(c) ? c : '_';
}
else
{
// _<num> variables are reserved by the internal implementation,
// otherwise, make sure the name is a valid identifier.
if (i == 0 || (str[0] == '_' && i == 1))
c = isalpha(c) ? c : '_';
else
c = isalnum(c) ? c : '_';
}
}
return str;
}
const string &ParsedIR::get_name(uint32_t id) const
{
return meta[id].decoration.alias;
}
const string &ParsedIR::get_member_name(uint32_t id, uint32_t index) const
{
auto &m = meta[id];
if (index >= m.members.size())
{
static string empty;
return empty;
}
return m.members[index].alias;
}
void ParsedIR::set_name(uint32_t id, const string &name)
{
auto &str = meta[id].decoration.alias;
str.clear();
if (name.empty())
return;
// glslang uses identifiers to pass along meaningful information
// about HLSL reflection.
// FIXME: This should be deprecated eventually.
auto &m = meta[id];
if (source.hlsl && name.size() >= 6 && name.find("@count") == name.size() - 6)
{
m.hlsl_magic_counter_buffer_candidate = true;
m.hlsl_magic_counter_buffer_name = name.substr(0, name.find("@count"));
}
else
{
m.hlsl_magic_counter_buffer_candidate = false;
m.hlsl_magic_counter_buffer_name.clear();
}
// Reserved for temporaries.
if (name[0] == '_' && name.size() >= 2 && isdigit(name[1]))
return;
str = ensure_valid_identifier(name, false);
}
void ParsedIR::set_member_name(uint32_t id, uint32_t index, const string &name)
{
meta[id].members.resize(max(meta[id].members.size(), size_t(index) + 1));
auto &str = meta[id].members[index].alias;
str.clear();
if (name.empty())
return;
// Reserved for unnamed members.
if (name[0] == '_' && name.size() >= 3 && name[1] == 'm' && isdigit(name[2]))
return;
str = ensure_valid_identifier(name, true);
}
void ParsedIR::set_decoration_string(uint32_t id, Decoration decoration, const string &argument)
{
auto &dec = meta[id].decoration;
dec.decoration_flags.set(decoration);
switch (decoration)
{
case DecorationHlslSemanticGOOGLE:
dec.hlsl_semantic = argument;
break;
default:
break;
}
}
void ParsedIR::set_decoration(uint32_t id, Decoration decoration, uint32_t argument)
{
auto &dec = meta[id].decoration;
dec.decoration_flags.set(decoration);
switch (decoration)
{
case DecorationBuiltIn:
dec.builtin = true;
dec.builtin_type = static_cast<BuiltIn>(argument);
break;
case DecorationLocation:
dec.location = argument;
break;
case DecorationComponent:
dec.component = argument;
break;
case DecorationOffset:
dec.offset = argument;
break;
case DecorationArrayStride:
dec.array_stride = argument;
break;
case DecorationMatrixStride:
dec.matrix_stride = argument;
break;
case DecorationBinding:
dec.binding = argument;
break;
case DecorationDescriptorSet:
dec.set = argument;
break;
case DecorationInputAttachmentIndex:
dec.input_attachment = argument;
break;
case DecorationSpecId:
dec.spec_id = argument;
break;
case DecorationIndex:
dec.index = argument;
break;
case DecorationHlslCounterBufferGOOGLE:
meta[id].hlsl_magic_counter_buffer = argument;
meta[id].hlsl_is_magic_counter_buffer = true;
break;
default:
break;
}
}
void ParsedIR::set_member_decoration(uint32_t id, uint32_t index, Decoration decoration, uint32_t argument)
{
meta[id].members.resize(max(meta[id].members.size(), size_t(index) + 1));
auto &dec = meta[id].members[index];
dec.decoration_flags.set(decoration);
switch (decoration)
{
case DecorationBuiltIn:
dec.builtin = true;
dec.builtin_type = static_cast<BuiltIn>(argument);
break;
case DecorationLocation:
dec.location = argument;
break;
case DecorationComponent:
dec.component = argument;
break;
case DecorationBinding:
dec.binding = argument;
break;
case DecorationOffset:
dec.offset = argument;
break;
case DecorationSpecId:
dec.spec_id = argument;
break;
case DecorationMatrixStride:
dec.matrix_stride = argument;
break;
case DecorationIndex:
dec.index = argument;
break;
default:
break;
}
}
// Recursively marks any constants referenced by the specified constant instruction as being used
// as an array length. The id must be a constant instruction (SPIRConstant or SPIRConstantOp).
void ParsedIR::mark_used_as_array_length(uint32_t id)
{
switch (ids[id].get_type())
{
case TypeConstant:
get<SPIRConstant>(id).is_used_as_array_length = true;
break;
case TypeConstantOp:
{
auto &cop = get<SPIRConstantOp>(id);
for (uint32_t arg_id : cop.arguments)
mark_used_as_array_length(arg_id);
break;
}
case TypeUndef:
break;
default:
assert(0);
}
}
Bitset ParsedIR::get_buffer_block_flags(const SPIRVariable &var) const
{
auto &type = get<SPIRType>(var.basetype);
assert(type.basetype == SPIRType::Struct);
// Some flags like non-writable, non-readable are actually found
// as member decorations. If all members have a decoration set, propagate
// the decoration up as a regular variable decoration.
Bitset base_flags = meta[var.self].decoration.decoration_flags;
if (type.member_types.empty())
return base_flags;
Bitset all_members_flags = get_member_decoration_bitset(type.self, 0);
for (uint32_t i = 1; i < uint32_t(type.member_types.size()); i++)
all_members_flags.merge_and(get_member_decoration_bitset(type.self, i));
base_flags.merge_or(all_members_flags);
return base_flags;
}
const Bitset &ParsedIR::get_member_decoration_bitset(uint32_t id, uint32_t index) const
{
auto &m = meta[id];
if (index >= m.members.size())
{
static const Bitset cleared = {};
return cleared;
}
return m.members[index].decoration_flags;
}
bool ParsedIR::has_decoration(uint32_t id, Decoration decoration) const
{
return get_decoration_bitset(id).get(decoration);
}
uint32_t ParsedIR::get_decoration(uint32_t id, Decoration decoration) const
{
auto &dec = meta[id].decoration;
if (!dec.decoration_flags.get(decoration))
return 0;
switch (decoration)
{
case DecorationBuiltIn:
return dec.builtin_type;
case DecorationLocation:
return dec.location;
case DecorationComponent:
return dec.component;
case DecorationOffset:
return dec.offset;
case DecorationBinding:
return dec.binding;
case DecorationDescriptorSet:
return dec.set;
case DecorationInputAttachmentIndex:
return dec.input_attachment;
case DecorationSpecId:
return dec.spec_id;
case DecorationArrayStride:
return dec.array_stride;
case DecorationMatrixStride:
return dec.matrix_stride;
case DecorationIndex:
return dec.index;
default:
return 1;
}
}
const string &ParsedIR::get_decoration_string(uint32_t id, Decoration decoration) const
{
auto &dec = meta[id].decoration;
static const string empty;
if (!dec.decoration_flags.get(decoration))
return empty;
switch (decoration)
{
case DecorationHlslSemanticGOOGLE:
return dec.hlsl_semantic;
default:
return empty;
}
}
void ParsedIR::unset_decoration(uint32_t id, Decoration decoration)
{
auto &dec = meta[id].decoration;
dec.decoration_flags.clear(decoration);
switch (decoration)
{
case DecorationBuiltIn:
dec.builtin = false;
break;
case DecorationLocation:
dec.location = 0;
break;
case DecorationComponent:
dec.component = 0;
break;
case DecorationOffset:
dec.offset = 0;
break;
case DecorationBinding:
dec.binding = 0;
break;
case DecorationDescriptorSet:
dec.set = 0;
break;
case DecorationInputAttachmentIndex:
dec.input_attachment = 0;
break;
case DecorationSpecId:
dec.spec_id = 0;
break;
case DecorationHlslSemanticGOOGLE:
dec.hlsl_semantic.clear();
break;
case DecorationHlslCounterBufferGOOGLE:
{
auto &counter = meta[id].hlsl_magic_counter_buffer;
if (counter)
{
meta[counter].hlsl_is_magic_counter_buffer = false;
counter = 0;
}
break;
}
default:
break;
}
}
bool ParsedIR::has_member_decoration(uint32_t id, uint32_t index, Decoration decoration) const
{
return get_member_decoration_bitset(id, index).get(decoration);
}
uint32_t ParsedIR::get_member_decoration(uint32_t id, uint32_t index, Decoration decoration) const
{
auto &m = meta[id];
if (index >= m.members.size())
return 0;
auto &dec = m.members[index];
if (!dec.decoration_flags.get(decoration))
return 0;
switch (decoration)
{
case DecorationBuiltIn:
return dec.builtin_type;
case DecorationLocation:
return dec.location;
case DecorationComponent:
return dec.component;
case DecorationBinding:
return dec.binding;
case DecorationOffset:
return dec.offset;
case DecorationSpecId:
return dec.spec_id;
case DecorationIndex:
return dec.index;
default:
return 1;
}
}
const Bitset &ParsedIR::get_decoration_bitset(uint32_t id) const
{
auto &dec = meta[id].decoration;
return dec.decoration_flags;
}
void ParsedIR::set_member_decoration_string(uint32_t id, uint32_t index, Decoration decoration, const string &argument)
{
meta[id].members.resize(max(meta[id].members.size(), size_t(index) + 1));
auto &dec = meta[id].members[index];
dec.decoration_flags.set(decoration);
switch (decoration)
{
case DecorationHlslSemanticGOOGLE:
dec.hlsl_semantic = argument;
break;
default:
break;
}
}
const string &ParsedIR::get_member_decoration_string(uint32_t id, uint32_t index, Decoration decoration) const
{
static const string empty;
auto &m = meta[id];
if (!has_member_decoration(id, index, decoration))
return empty;
auto &dec = m.members[index];
switch (decoration)
{
case DecorationHlslSemanticGOOGLE:
return dec.hlsl_semantic;
default:
return empty;
}
}
void ParsedIR::unset_member_decoration(uint32_t id, uint32_t index, Decoration decoration)
{
auto &m = meta[id];
if (index >= m.members.size())
return;
auto &dec = m.members[index];
dec.decoration_flags.clear(decoration);
switch (decoration)
{
case DecorationBuiltIn:
dec.builtin = false;
break;
case DecorationLocation:
dec.location = 0;
break;
case DecorationComponent:
dec.component = 0;
break;
case DecorationOffset:
dec.offset = 0;
break;
case DecorationSpecId:
dec.spec_id = 0;
break;
case DecorationHlslSemanticGOOGLE:
dec.hlsl_semantic.clear();
break;
default:
break;
}
}
uint32_t ParsedIR::increase_bound_by(uint32_t incr_amount)
{
auto curr_bound = ids.size();
auto new_bound = curr_bound + incr_amount;
ids.resize(new_bound);
meta.resize(new_bound);
block_meta.resize(new_bound);
return uint32_t(curr_bound);
}
} // namespace spirv_cross

129
spirv_cross_parsed_ir.hpp Normal file
View File

@ -0,0 +1,129 @@
/*
* Copyright 2018 Arm Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SPIRV_CROSS_PARSED_IR_HPP
#define SPIRV_CROSS_PARSED_IR_HPP
#include "spirv_common.hpp"
#include <stdint.h>
#include <unordered_map>
#include <vector>
namespace spirv_cross
{
// This data structure holds all information needed to perform cross-compilation and reflection.
// It is the output of the Parser, but any implementation could create this structure.
// It is intentionally very "open" and struct-like with some helper functions to deal with decorations.
// Parser is the reference implementation of how this data structure should be filled in.
class ParsedIR
{
public:
// Resizes ids, meta and block_meta.
void set_id_bounds(uint32_t bounds);
// The raw SPIR-V, instructions and opcodes refer to this by offset + count.
std::vector<uint32_t> spirv;
// Holds various data structures which inherit from IVariant.
std::vector<Variant> ids;
// Various meta data for IDs, decorations, names, etc.
std::vector<Meta> meta;
// Declared capabilities and extensions in the SPIR-V module.
// Not really used except for reflection at the moment.
std::vector<spv::Capability> declared_capabilities;
std::vector<std::string> declared_extensions;
// Meta data about blocks. The cross-compiler needs to query if a block is either of these types.
// It is a bitset as there can be more than one tag per block.
enum BlockMetaFlagBits
{
BLOCK_META_LOOP_HEADER_BIT = 1 << 0,
BLOCK_META_CONTINUE_BIT = 1 << 1,
BLOCK_META_LOOP_MERGE_BIT = 1 << 2,
BLOCK_META_SELECTION_MERGE_BIT = 1 << 3,
BLOCK_META_MULTISELECT_MERGE_BIT = 1 << 4
};
using BlockMetaFlags = uint8_t;
std::vector<BlockMetaFlags> block_meta;
std::unordered_map<uint32_t, uint32_t> continue_block_to_loop_header;
// Normally, we'd stick SPIREntryPoint in ids array, but it conflicts with SPIRFunction.
// Entry points can therefore be seen as some sort of meta structure.
std::unordered_map<uint32_t, SPIREntryPoint> entry_points;
uint32_t default_entry_point = 0;
struct Source
{
uint32_t version = 0;
bool es = false;
bool known = false;
bool hlsl = false;
Source() = default;
};
Source source;
// Decoration handling methods.
// Can be useful for simple "raw" reflection.
// However, most members are here because the Parser needs most of these,
// and might as well just have the whole suite of decoration/name handling in one place.
void set_name(uint32_t id, const std::string &name);
const std::string &get_name(uint32_t id) const;
void set_decoration(uint32_t id, spv::Decoration decoration, uint32_t argument = 0);
void set_decoration_string(uint32_t id, spv::Decoration decoration, const std::string &argument);
bool has_decoration(uint32_t id, spv::Decoration decoration) const;
uint32_t get_decoration(uint32_t id, spv::Decoration decoration) const;
const std::string &get_decoration_string(uint32_t id, spv::Decoration decoration) const;
const Bitset &get_decoration_bitset(uint32_t id) const;
void unset_decoration(uint32_t id, spv::Decoration decoration);
// Decoration handling methods (for members of a struct).
void set_member_name(uint32_t id, uint32_t index, const std::string &name);
const std::string &get_member_name(uint32_t id, uint32_t index) const;
void set_member_decoration(uint32_t id, uint32_t index, spv::Decoration decoration, uint32_t argument = 0);
void set_member_decoration_string(uint32_t id, uint32_t index, spv::Decoration decoration,
const std::string &argument);
uint32_t get_member_decoration(uint32_t id, uint32_t index, spv::Decoration decoration) const;
const std::string &get_member_decoration_string(uint32_t id, uint32_t index, spv::Decoration decoration) const;
bool has_member_decoration(uint32_t id, uint32_t index, spv::Decoration decoration) const;
const Bitset &get_member_decoration_bitset(uint32_t id, uint32_t index) const;
void unset_member_decoration(uint32_t id, uint32_t index, spv::Decoration decoration);
void mark_used_as_array_length(uint32_t id);
uint32_t increase_bound_by(uint32_t count);
Bitset get_buffer_block_flags(const SPIRVariable &var) const;
private:
template <typename T>
T &get(uint32_t id)
{
return variant_get<T>(ids[id]);
}
template <typename T>
const T &get(uint32_t id) const
{
return variant_get<T>(ids[id]);
}
};
} // namespace spirv_cross
#endif

View File

@ -290,7 +290,7 @@ void CompilerGLSL::reset()
block_ssbo_names.clear();
function_overloads.clear();
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
@ -343,7 +343,7 @@ void CompilerGLSL::remap_pls_variables()
void CompilerGLSL::find_static_extensions()
{
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeType)
{
@ -443,7 +443,7 @@ string CompilerGLSL::compile()
emit_header();
emit_resources();
emit_function(get<SPIRFunction>(entry_point), Bitset());
emit_function(get<SPIRFunction>(ir.default_entry_point), Bitset());
pass_count++;
} while (force_recompile);
@ -738,12 +738,12 @@ string CompilerGLSL::layout_for_member(const SPIRType &type, uint32_t index)
if (is_legacy())
return "";
bool is_block = meta[type.self].decoration.decoration_flags.get(DecorationBlock) ||
meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
bool is_block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) ||
ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
if (!is_block)
return "";
auto &memb = meta[type.self].members;
auto &memb = ir.meta[type.self].members;
if (index >= memb.size())
return "";
auto &dec = memb[index];
@ -938,7 +938,7 @@ uint32_t CompilerGLSL::type_to_packed_alignment(const SPIRType &type, const Bits
uint32_t alignment = 0;
for (uint32_t i = 0; i < type.member_types.size(); i++)
{
auto member_flags = meta[type.self].members.at(i).decoration_flags;
auto member_flags = ir.meta[type.self].members.at(i).decoration_flags;
alignment =
max(alignment, type_to_packed_alignment(get<SPIRType>(type.member_types[i]), member_flags, packing));
}
@ -1043,7 +1043,7 @@ uint32_t CompilerGLSL::type_to_packed_size(const SPIRType &type, const Bitset &f
for (uint32_t i = 0; i < type.member_types.size(); i++)
{
auto member_flags = meta[type.self].members.at(i).decoration_flags;
auto member_flags = ir.meta[type.self].members.at(i).decoration_flags;
auto &member_type = get<SPIRType>(type.member_types[i]);
uint32_t packed_alignment = type_to_packed_alignment(member_type, member_flags, packing);
@ -1113,7 +1113,7 @@ bool CompilerGLSL::buffer_is_packing_standard(const SPIRType &type, BufferPackin
for (uint32_t i = 0; i < type.member_types.size(); i++)
{
auto &memb_type = get<SPIRType>(type.member_types[i]);
auto member_flags = meta[type.self].members.at(i).decoration_flags;
auto member_flags = ir.meta[type.self].members.at(i).decoration_flags;
// Verify alignment rules.
uint32_t packed_alignment = type_to_packed_alignment(memb_type, member_flags, packing);
@ -1221,10 +1221,10 @@ string CompilerGLSL::layout_for_variable(const SPIRVariable &var)
vector<string> attr;
auto &dec = meta[var.self].decoration;
auto &dec = ir.meta[var.self].decoration;
auto &type = get<SPIRType>(var.basetype);
auto flags = dec.decoration_flags;
auto typeflags = meta[type.self].decoration.decoration_flags;
auto typeflags = ir.meta[type.self].decoration.decoration_flags;
if (options.vulkan_semantics && var.storage == StorageClassPushConstant)
attr.push_back("push_constant");
@ -1244,7 +1244,7 @@ string CompilerGLSL::layout_for_variable(const SPIRVariable &var)
if (flags.get(DecorationLocation) && can_use_io_location(var.storage, is_block))
{
Bitset combined_decoration;
for (uint32_t i = 0; i < meta[type.self].members.size(); i++)
for (uint32_t i = 0; i < ir.meta[type.self].members.size(); i++)
combined_decoration.merge_or(combined_decoration_for_member(type, i));
// If our members have location decorations, we don't need to
@ -1411,7 +1411,7 @@ void CompilerGLSL::emit_push_constant_block_glsl(const SPIRVariable &var)
// OpenGL has no concept of push constant blocks, implement it as a uniform struct.
auto &type = get<SPIRType>(var.basetype);
auto &flags = meta[var.self].decoration.decoration_flags;
auto &flags = ir.meta[var.self].decoration.decoration_flags;
flags.clear(DecorationBinding);
flags.clear(DecorationDescriptorSet);
@ -1423,7 +1423,7 @@ void CompilerGLSL::emit_push_constant_block_glsl(const SPIRVariable &var)
// We're emitting the push constant block as a regular struct, so disable the block qualifier temporarily.
// Otherwise, we will end up emitting layout() qualifiers on naked structs which is not allowed.
auto &block_flags = meta[type.self].decoration.decoration_flags;
auto &block_flags = ir.meta[type.self].decoration.decoration_flags;
bool block_flag = block_flags.get(DecorationBlock);
block_flags.clear(DecorationBlock);
@ -1450,13 +1450,13 @@ void CompilerGLSL::emit_buffer_block_legacy(const SPIRVariable &var)
{
auto &type = get<SPIRType>(var.basetype);
bool ssbo = var.storage == StorageClassStorageBuffer ||
meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
if (ssbo)
SPIRV_CROSS_THROW("SSBOs not supported in legacy targets.");
// We're emitting the push constant block as a regular struct, so disable the block qualifier temporarily.
// Otherwise, we will end up emitting layout() qualifiers on naked structs which is not allowed.
auto &block_flags = meta[type.self].decoration.decoration_flags;
auto &block_flags = ir.meta[type.self].decoration.decoration_flags;
bool block_flag = block_flags.get(DecorationBlock);
block_flags.clear(DecorationBlock);
emit_struct(type);
@ -1470,9 +1470,9 @@ void CompilerGLSL::emit_buffer_block_native(const SPIRVariable &var)
{
auto &type = get<SPIRType>(var.basetype);
Bitset flags = get_buffer_block_flags(var);
Bitset flags = ir.get_buffer_block_flags(var);
bool ssbo = var.storage == StorageClassStorageBuffer ||
meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
bool is_restrict = ssbo && flags.get(DecorationRestrict);
bool is_writeonly = ssbo && flags.get(DecorationNonReadable);
bool is_readonly = ssbo && flags.get(DecorationNonWritable);
@ -1485,7 +1485,7 @@ void CompilerGLSL::emit_buffer_block_native(const SPIRVariable &var)
// Shaders never use the block by interface name, so we don't
// have to track this other than updating name caches.
if (meta[type.self].decoration.alias.empty() || block_namespace.find(buffer_name) != end(block_namespace))
if (ir.meta[type.self].decoration.alias.empty() || block_namespace.find(buffer_name) != end(block_namespace))
buffer_name = get_block_fallback_name(var.self);
// Make sure we get something unique.
@ -1540,7 +1540,7 @@ void CompilerGLSL::emit_buffer_block_flattened(const SPIRVariable &var)
if (basic_type != SPIRType::Float && basic_type != SPIRType::Int && basic_type != SPIRType::UInt)
SPIRV_CROSS_THROW("Basic types in a flattened UBO must be float, int or uint.");
auto flags = get_buffer_block_flags(var);
auto flags = ir.get_buffer_block_flags(var);
statement("uniform ", flags_to_precision_qualifiers_glsl(tmp, flags), type_to_glsl(tmp), " ", buffer_name, "[",
buffer_size, "];");
}
@ -1576,9 +1576,9 @@ void CompilerGLSL::emit_flattened_io_block(const SPIRVariable &var, const char *
if (!type.array.empty())
SPIRV_CROSS_THROW("Array of varying structs cannot be flattened to legacy-compatible varyings.");
auto old_flags = meta[type.self].decoration.decoration_flags;
auto old_flags = ir.meta[type.self].decoration.decoration_flags;
// Emit the members as if they are part of a block to get all qualifiers.
meta[type.self].decoration.decoration_flags.set(DecorationBlock);
ir.meta[type.self].decoration.decoration_flags.set(DecorationBlock);
type.member_name_cache.clear();
@ -1604,7 +1604,7 @@ void CompilerGLSL::emit_flattened_io_block(const SPIRVariable &var, const char *
i++;
}
meta[type.self].decoration.decoration_flags = old_flags;
ir.meta[type.self].decoration.decoration_flags = old_flags;
// Treat this variable as flattened from now on.
flattened_structs.insert(var.self);
@ -1615,7 +1615,7 @@ void CompilerGLSL::emit_interface_block(const SPIRVariable &var)
auto &type = get<SPIRType>(var.basetype);
// Either make it plain in/out or in/out blocks depending on what shader is doing ...
bool block = meta[type.self].decoration.decoration_flags.get(DecorationBlock);
bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock);
const char *qual = to_storage_qualifiers_glsl(var);
if (block)
@ -1800,14 +1800,14 @@ void CompilerGLSL::replace_illegal_names()
};
// clang-format on
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
if (!is_hidden_variable(var))
{
auto &m = meta[var.self].decoration;
auto &m = ir.meta[var.self].decoration;
if (m.alias.compare(0, 3, "gl_") == 0 || keywords.find(m.alias) != end(keywords))
m.alias = join("_", m.alias);
}
@ -1817,7 +1817,7 @@ void CompilerGLSL::replace_illegal_names()
void CompilerGLSL::replace_fragment_output(SPIRVariable &var)
{
auto &m = meta[var.self].decoration;
auto &m = ir.meta[var.self].decoration;
uint32_t location = 0;
if (m.decoration_flags.get(DecorationLocation))
location = m.location;
@ -1855,7 +1855,7 @@ void CompilerGLSL::replace_fragment_output(SPIRVariable &var)
void CompilerGLSL::replace_fragment_outputs()
{
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
@ -1925,7 +1925,7 @@ void CompilerGLSL::emit_pls()
void CompilerGLSL::fixup_image_load_store_access()
{
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() != TypeVariable)
continue;
@ -1938,7 +1938,7 @@ void CompilerGLSL::fixup_image_load_store_access()
// Solve this by making the image access as restricted as possible and loosen up if we need to.
// If any no-read/no-write flags are actually set, assume that the compiler knows what it's doing.
auto &flags = meta.at(var).decoration.decoration_flags;
auto &flags = ir.meta.at(var).decoration.decoration_flags;
if (!flags.get(DecorationNonWritable) && !flags.get(DecorationNonReadable))
{
flags.set(DecorationNonWritable);
@ -1961,7 +1961,7 @@ void CompilerGLSL::emit_declared_builtin_block(StorageClass storage, ExecutionMo
uint32_t cull_distance_size = 0;
uint32_t clip_distance_size = 0;
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() != TypeVariable)
continue;
@ -1974,7 +1974,7 @@ void CompilerGLSL::emit_declared_builtin_block(StorageClass storage, ExecutionMo
if (var.storage == storage && block && is_builtin_variable(var))
{
uint32_t index = 0;
for (auto &m : meta[type.self].members)
for (auto &m : ir.meta[type.self].members)
{
if (m.builtin)
{
@ -1990,7 +1990,7 @@ void CompilerGLSL::emit_declared_builtin_block(StorageClass storage, ExecutionMo
else if (var.storage == storage && !block && is_builtin_variable(var))
{
// While we're at it, collect all declared global builtins (HLSL mostly ...).
auto &m = meta[var.self].decoration;
auto &m = ir.meta[var.self].decoration;
if (m.builtin)
{
global_builtins.set(m.builtin_type);
@ -2062,7 +2062,7 @@ void CompilerGLSL::emit_declared_builtin_block(StorageClass storage, ExecutionMo
void CompilerGLSL::declare_undefined_values()
{
bool emitted = false;
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() != TypeUndef)
continue;
@ -2138,7 +2138,7 @@ void CompilerGLSL::emit_resources()
//
// TODO: If we have the fringe case that we create a spec constant which depends on a struct type,
// we'll have to deal with that, but there's currently no known way to express that.
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeConstant)
{
@ -2165,14 +2165,14 @@ void CompilerGLSL::emit_resources()
// Output all basic struct types which are not Block or BufferBlock as these are declared inplace
// when such variables are instantiated.
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeType)
{
auto &type = id.get<SPIRType>();
if (type.basetype == SPIRType::Struct && type.array.empty() && !type.pointer &&
(!meta[type.self].decoration.decoration_flags.get(DecorationBlock) &&
!meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock)))
(!ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) &&
!ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock)))
{
emit_struct(type);
}
@ -2180,7 +2180,7 @@ void CompilerGLSL::emit_resources()
}
// Output UBOs and SSBOs
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
@ -2188,8 +2188,8 @@ void CompilerGLSL::emit_resources()
auto &type = get<SPIRType>(var.basetype);
bool is_block_storage = type.storage == StorageClassStorageBuffer || type.storage == StorageClassUniform;
bool has_block_flags = meta[type.self].decoration.decoration_flags.get(DecorationBlock) ||
meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
bool has_block_flags = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) ||
ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
if (var.storage != StorageClassFunction && type.pointer && is_block_storage && !is_hidden_variable(var) &&
has_block_flags)
@ -2200,7 +2200,7 @@ void CompilerGLSL::emit_resources()
}
// Output push constant blocks
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
@ -2217,7 +2217,7 @@ void CompilerGLSL::emit_resources()
bool skip_separate_image_sampler = !combined_image_samplers.empty() || !options.vulkan_semantics;
// Output Uniform Constants (values, samplers, images, etc).
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
@ -2250,7 +2250,7 @@ void CompilerGLSL::emit_resources()
emitted = false;
// Output in/out interfaces.
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
@ -2269,7 +2269,7 @@ void CompilerGLSL::emit_resources()
// For gl_InstanceIndex emulation on GLES, the API user needs to
// supply this uniform.
if (options.vertex.support_nonzero_base_instance &&
meta[var.self].decoration.builtin_type == BuiltInInstanceIndex && !options.vulkan_semantics)
ir.meta[var.self].decoration.builtin_type == BuiltInInstanceIndex && !options.vulkan_semantics)
{
statement("uniform int SPIRV_Cross_BaseInstance;");
emitted = true;
@ -2438,7 +2438,7 @@ string CompilerGLSL::to_expression(uint32_t id)
if (itr != end(invalid_expressions))
handle_invalid_expression(id);
if (ids[id].get_type() == TypeExpression)
if (ir.ids[id].get_type() == TypeExpression)
{
// We might have a more complex chain of dependencies.
// A possible scenario is that we
@ -2459,7 +2459,7 @@ string CompilerGLSL::to_expression(uint32_t id)
track_expression_read(id);
switch (ids[id].get_type())
switch (ir.ids[id].get_type())
{
case TypeExpression:
{
@ -2491,7 +2491,7 @@ string CompilerGLSL::to_expression(uint32_t id)
auto &type = get<SPIRType>(c.constant_type);
// WorkGroupSize may be a constant.
auto &dec = meta[c.self].decoration;
auto &dec = ir.meta[c.self].decoration;
if (dec.builtin)
return builtin_to_glsl(dec.builtin_type, StorageClassGeneric);
else if (c.specialization && options.vulkan_semantics)
@ -2530,7 +2530,7 @@ string CompilerGLSL::to_expression(uint32_t id)
}
else
{
auto &dec = meta[var.self].decoration;
auto &dec = ir.meta[var.self].decoration;
if (dec.builtin)
return builtin_to_glsl(dec.builtin_type, var.storage);
else
@ -3270,7 +3270,7 @@ string CompilerGLSL::constant_expression_vector(const SPIRConstant &c, uint32_t
string CompilerGLSL::declare_temporary(uint32_t result_type, uint32_t result_id)
{
auto &type = get<SPIRType>(result_type);
auto flags = meta[result_id].decoration.decoration_flags;
auto flags = ir.meta[result_id].decoration.decoration_flags;
// If we're declaring temporaries inside continue blocks,
// we must declare the temporary in the loop header so that the continue block can avoid declaring new variables.
@ -3863,13 +3863,10 @@ void CompilerGLSL::emit_sampled_image_op(uint32_t result_type, uint32_t result_i
void CompilerGLSL::emit_texture_op(const Instruction &i)
{
auto ops = stream(i);
auto *ops = stream(i);
auto op = static_cast<Op>(i.op);
uint32_t length = i.length;
if (i.offset + length > spirv.size())
SPIRV_CROSS_THROW("Compiler::parse() opcode out of range.");
vector<uint32_t> inherited_expressions;
uint32_t result_type = ops[0];
@ -4365,7 +4362,7 @@ void CompilerGLSL::emit_glsl_op(uint32_t result_type, uint32_t id, uint32_t eop,
{
forced_temporaries.insert(id);
auto &type = get<SPIRType>(result_type);
auto flags = meta[id].decoration.decoration_flags;
auto flags = ir.meta[id].decoration.decoration_flags;
statement(flags_to_precision_qualifiers_glsl(type, flags), variable_decl(type, to_name(id)), ";");
set<SPIRExpression>(id, to_name(id), result_type, true);
@ -4488,7 +4485,7 @@ void CompilerGLSL::emit_glsl_op(uint32_t result_type, uint32_t id, uint32_t eop,
{
forced_temporaries.insert(id);
auto &type = get<SPIRType>(result_type);
auto flags = meta[id].decoration.decoration_flags;
auto flags = ir.meta[id].decoration.decoration_flags;
statement(flags_to_precision_qualifiers_glsl(type, flags), variable_decl(type, to_name(id)), ";");
set<SPIRExpression>(id, to_name(id), result_type, true);
@ -5306,7 +5303,7 @@ string CompilerGLSL::access_chain_internal(uint32_t base, const uint32_t *indice
// but HLSL seems to just emit straight arrays here.
// We must pretend this access goes through gl_in/gl_out arrays
// to be able to access certain builtins as arrays.
auto builtin = meta[base].decoration.builtin_type;
auto builtin = ir.meta[base].decoration.builtin_type;
switch (builtin)
{
// case BuiltInCullDistance: // These are already arrays, need to figure out rules for these in tess/geom.
@ -5426,7 +5423,7 @@ string CompilerGLSL::access_chain_internal(uint32_t base, const uint32_t *indice
expr += ".";
expr += index_to_swizzle(index);
}
else if (ids[index].get_type() == TypeConstant && !is_packed)
else if (ir.ids[index].get_type() == TypeConstant && !is_packed)
{
auto &c = get<SPIRConstant>(index);
expr += ".";
@ -6486,7 +6483,7 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
splat = in_type->vecsize == 1 && in_type->columns == 1 && !composite && backend.use_constructor_splatting;
swizzle_splat = in_type->vecsize == 1 && in_type->columns == 1 && backend.can_swizzle_scalar;
if (ids[elems[0]].get_type() == TypeConstant && !type_is_floating_point(*in_type))
if (ir.ids[elems[0]].get_type() == TypeConstant && !type_is_floating_point(*in_type))
{
// Cannot swizzle literal integers as a special case.
swizzle_splat = false;
@ -6519,7 +6516,7 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
// We cannot construct array of arrays because we cannot treat the inputs
// as value types. Need to declare the array-of-arrays, and copy in elements one by one.
forced_temporaries.insert(id);
auto flags = meta[id].decoration.decoration_flags;
auto flags = ir.meta[id].decoration.decoration_flags;
statement(flags_to_precision_qualifiers_glsl(out_type, flags), variable_decl(out_type, to_name(id)), ";");
set<SPIRExpression>(id, to_name(id), result_type, true);
for (uint32_t i = 0; i < length; i++)
@ -6906,7 +6903,7 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
case OpBitwiseXor:
{
auto type = get<SPIRType>(ops[0]).basetype;
GLSL_BOP_CAST (^, type);
GLSL_BOP_CAST(^, type);
break;
}
@ -7580,7 +7577,7 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
auto *var = maybe_get_backing_variable(ops[2]);
if (var)
{
auto &flags = meta.at(var->self).decoration.decoration_flags;
auto &flags = ir.meta.at(var->self).decoration.decoration_flags;
if (flags.get(DecorationNonReadable))
{
flags.clear(DecorationNonReadable);
@ -7728,7 +7725,7 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
auto *var = maybe_get_backing_variable(ops[0]);
if (var)
{
auto &flags = meta.at(var->self).decoration.decoration_flags;
auto &flags = ir.meta.at(var->self).decoration.decoration_flags;
if (flags.get(DecorationNonWritable))
{
flags.clear(DecorationNonWritable);
@ -8225,7 +8222,7 @@ void CompilerGLSL::append_global_func_args(const SPIRFunction &func, uint32_t in
string CompilerGLSL::to_member_name(const SPIRType &type, uint32_t index)
{
auto &memb = meta[type.self].members;
auto &memb = ir.meta[type.self].members;
if (index < memb.size() && !memb[index].alias.empty())
return memb[index].alias;
else
@ -8239,7 +8236,7 @@ string CompilerGLSL::to_member_reference(const SPIRVariable *, const SPIRType &t
void CompilerGLSL::add_member_name(SPIRType &type, uint32_t index)
{
auto &memb = meta[type.self].members;
auto &memb = ir.meta[type.self].members;
if (index < memb.size() && !memb[index].alias.empty())
{
auto &name = memb[index].alias;
@ -8266,7 +8263,7 @@ bool CompilerGLSL::is_non_native_row_major_matrix(uint32_t id)
return false;
// Non-matrix or column-major matrix types do not need to be converted.
if (!meta[id].decoration.decoration_flags.get(DecorationRowMajor))
if (!ir.meta[id].decoration.decoration_flags.get(DecorationRowMajor))
return false;
// Only square row-major matrices can be converted at this time.
@ -8332,13 +8329,13 @@ void CompilerGLSL::emit_struct_member(const SPIRType &type, uint32_t member_type
auto &membertype = get<SPIRType>(member_type_id);
Bitset memberflags;
auto &memb = meta[type.self].members;
auto &memb = ir.meta[type.self].members;
if (index < memb.size())
memberflags = memb[index].decoration_flags;
string qualifiers;
bool is_block = meta[type.self].decoration.decoration_flags.get(DecorationBlock) ||
meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
bool is_block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) ||
ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
if (is_block)
qualifiers = to_interpolation_qualifiers(memberflags);
@ -8402,12 +8399,12 @@ const char *CompilerGLSL::flags_to_precision_qualifiers_glsl(const SPIRType &typ
const char *CompilerGLSL::to_precision_qualifiers_glsl(uint32_t id)
{
return flags_to_precision_qualifiers_glsl(expression_type(id), meta[id].decoration.decoration_flags);
return flags_to_precision_qualifiers_glsl(expression_type(id), ir.meta[id].decoration.decoration_flags);
}
string CompilerGLSL::to_qualifiers_glsl(uint32_t id)
{
auto flags = meta[id].decoration.decoration_flags;
auto flags = ir.meta[id].decoration.decoration_flags;
string res;
auto *var = maybe_get<SPIRVariable>(id);
@ -8469,13 +8466,13 @@ string CompilerGLSL::variable_decl(const SPIRVariable &variable)
if (variable.loop_variable && variable.static_expression)
{
uint32_t expr = variable.static_expression;
if (ids[expr].get_type() != TypeUndef)
if (ir.ids[expr].get_type() != TypeUndef)
res += join(" = ", to_expression(variable.static_expression));
}
else if (variable.initializer)
{
uint32_t expr = variable.initializer;
if (ids[expr].get_type() != TypeUndef)
if (ir.ids[expr].get_type() != TypeUndef)
res += join(" = ", to_initializer_expression(variable));
}
return res;
@ -8483,7 +8480,7 @@ string CompilerGLSL::variable_decl(const SPIRVariable &variable)
const char *CompilerGLSL::to_pls_qualifiers_glsl(const SPIRVariable &variable)
{
auto flags = meta[variable.self].decoration.decoration_flags;
auto flags = ir.meta[variable.self].decoration.decoration_flags;
if (flags.get(DecorationRelaxedPrecision))
return "mediump ";
else
@ -8838,7 +8835,7 @@ void CompilerGLSL::add_variable(unordered_set<string> &variables, string &name)
void CompilerGLSL::add_variable(unordered_set<string> &variables, uint32_t id)
{
auto &name = meta[id].decoration.alias;
auto &name = ir.meta[id].decoration.alias;
add_variable(variables, name);
}
@ -8883,7 +8880,7 @@ void CompilerGLSL::flatten_buffer_block(uint32_t id)
auto &var = get<SPIRVariable>(id);
auto &type = get<SPIRType>(var.basetype);
auto name = to_name(type.self, false);
auto flags = meta.at(type.self).decoration.decoration_flags;
auto flags = ir.meta.at(type.self).decoration.decoration_flags;
if (!type.array.empty())
SPIRV_CROSS_THROW(name + " is an array of UBOs.");
@ -8908,7 +8905,7 @@ bool CompilerGLSL::check_atomic_image(uint32_t id)
auto *var = maybe_get_backing_variable(id);
if (var)
{
auto &flags = meta.at(var->self).decoration.decoration_flags;
auto &flags = ir.meta.at(var->self).decoration.decoration_flags;
if (flags.get(DecorationNonWritable) || flags.get(DecorationNonReadable))
{
flags.clear(DecorationNonWritable);
@ -8978,7 +8975,7 @@ void CompilerGLSL::add_function_overload(const SPIRFunction &func)
void CompilerGLSL::emit_function_prototype(SPIRFunction &func, const Bitset &return_flags)
{
if (func.self != entry_point)
if (func.self != ir.default_entry_point)
add_function_overload(func);
// Avoid shadow declarations.
@ -8992,7 +8989,7 @@ void CompilerGLSL::emit_function_prototype(SPIRFunction &func, const Bitset &ret
decl += type_to_array_glsl(type);
decl += " ";
if (func.self == entry_point)
if (func.self == ir.default_entry_point)
{
decl += "main";
processing_entry_point = true;
@ -9064,7 +9061,7 @@ void CompilerGLSL::emit_function(SPIRFunction &func, const Bitset &return_flags)
{
// Recursively emit functions which are called.
uint32_t id = ops[2];
emit_function(get<SPIRFunction>(id), meta[ops[1]].decoration.decoration_flags);
emit_function(get<SPIRFunction>(id), ir.meta[ops[1]].decoration.decoration_flags);
}
}
}
@ -9072,7 +9069,7 @@ void CompilerGLSL::emit_function(SPIRFunction &func, const Bitset &return_flags)
emit_function_prototype(func, return_flags);
begin_scope();
if (func.self == entry_point)
if (func.self == ir.default_entry_point)
emit_entry_point_declarations();
current_function = &func;
@ -9272,7 +9269,7 @@ void CompilerGLSL::branch(uint32_t from, uint32_t to)
flush_all_active_variables();
// This is only a continue if we branch to our loop dominator.
if (loop_blocks.find(to) != end(loop_blocks) && get<SPIRBlock>(from).loop_dominator == to)
if ((ir.block_meta[to] & ParsedIR::BLOCK_META_LOOP_HEADER_BIT) != 0 && get<SPIRBlock>(from).loop_dominator == to)
{
// This can happen if we had a complex continue block which was emitted.
// Once the continue block tries to branch to the loop header, just emit continue;
@ -9426,7 +9423,7 @@ string CompilerGLSL::emit_continue_block(uint32_t continue_block)
redirect_statement = &statements;
// Stamp out all blocks one after each other.
while (loop_blocks.find(block->self) == end(loop_blocks))
while ((ir.block_meta[block->self] & ParsedIR::BLOCK_META_LOOP_HEADER_BIT) == 0)
{
propagate_loop_dominators(*block);
// Write out all instructions we have in this block.
@ -9488,7 +9485,7 @@ string CompilerGLSL::emit_for_loop_initializers(const SPIRBlock &block)
// Sometimes loop variables are initialized with OpUndef, but we can just declare
// a plain variable without initializer in this case.
if (expr == 0 || ids[expr].get_type() == TypeUndef)
if (expr == 0 || ir.ids[expr].get_type() == TypeUndef)
missing_initializers++;
}
@ -9511,7 +9508,7 @@ string CompilerGLSL::emit_for_loop_initializers(const SPIRBlock &block)
for (auto &loop_var : block.loop_variables)
{
uint32_t static_expr = get<SPIRVariable>(loop_var).static_expression;
if (static_expr == 0 || ids[static_expr].get_type() == TypeUndef)
if (static_expr == 0 || ir.ids[static_expr].get_type() == TypeUndef)
{
statement(variable_decl(get<SPIRVariable>(loop_var)), ";");
}
@ -9546,7 +9543,7 @@ bool CompilerGLSL::for_loop_initializers_are_same_type(const SPIRBlock &block)
{
// Don't care about uninitialized variables as they will not be part of the initializers.
uint32_t expr = get<SPIRVariable>(var).static_expression;
if (expr == 0 || ids[expr].get_type() == TypeUndef)
if (expr == 0 || ir.ids[expr].get_type() == TypeUndef)
continue;
if (expected == 0)
@ -9711,7 +9708,7 @@ void CompilerGLSL::emit_hoisted_temporaries(vector<pair<uint32_t, uint32_t>> &te
for (auto &tmp : temporaries)
{
add_local_variable_name(tmp.second);
auto flags = meta[tmp.second].decoration.decoration_flags;
auto flags = ir.meta[tmp.second].decoration.decoration_flags;
auto &type = get<SPIRType>(tmp.first);
statement(flags_to_precision_qualifiers_glsl(type, flags), variable_decl(type, to_name(tmp.second)), ";");
@ -9971,7 +9968,7 @@ void CompilerGLSL::emit_block_chain(SPIRBlock &block)
{
// If we cannot return arrays, we will have a special out argument we can write to instead.
// The backend is responsible for setting this up, and redirection the return values as appropriate.
if (ids.at(block.return_value).get_type() != TypeUndef)
if (ir.ids.at(block.return_value).get_type() != TypeUndef)
emit_array_copy("SPIRV_Cross_return_value", block.return_value);
if (!block_is_outside_flow_control_from_block(get<SPIRBlock>(current_function->entry_block), block) ||
@ -9983,7 +9980,7 @@ void CompilerGLSL::emit_block_chain(SPIRBlock &block)
else
{
// OpReturnValue can return Undef, so don't emit anything for this case.
if (ids.at(block.return_value).get_type() != TypeUndef)
if (ir.ids.at(block.return_value).get_type() != TypeUndef)
statement("return ", to_expression(block.return_value), ";");
}
}

View File

@ -127,14 +127,26 @@ public:
remap_pls_variables();
}
CompilerGLSL(std::vector<uint32_t> spirv_)
explicit CompilerGLSL(std::vector<uint32_t> spirv_)
: Compiler(move(spirv_))
{
init();
}
CompilerGLSL(const uint32_t *ir, size_t word_count)
: Compiler(ir, word_count)
CompilerGLSL(const uint32_t *ir_, size_t word_count)
: Compiler(ir_, word_count)
{
init();
}
explicit CompilerGLSL(const ParsedIR &ir_)
: Compiler(ir_)
{
init();
}
explicit CompilerGLSL(ParsedIR &&ir_)
: Compiler(std::move(ir_))
{
init();
}
@ -586,10 +598,10 @@ protected:
private:
void init()
{
if (source.known)
if (ir.source.known)
{
options.es = source.es;
options.version = source.version;
options.es = ir.source.es;
options.version = ir.source.version;
}
}
};

View File

@ -472,7 +472,7 @@ void CompilerHLSL::emit_interface_block_globally(const SPIRVariable &var)
// The global copies of I/O variables should not contain interpolation qualifiers.
// These are emitted inside the interface structs.
auto &flags = meta[var.self].decoration.decoration_flags;
auto &flags = ir.meta[var.self].decoration.decoration_flags;
auto old_flags = flags;
flags.reset();
statement("static ", variable_decl(var), ";");
@ -806,7 +806,7 @@ void CompilerHLSL::emit_interface_block_in_struct(const SPIRVariable &var, unord
bool need_matrix_unroll = var.storage == StorageClassInput && execution.model == ExecutionModelVertex;
auto &m = meta[var.self].decoration;
auto &m = ir.meta[var.self].decoration;
auto name = to_name(var.self);
if (use_location_number)
{
@ -992,7 +992,7 @@ void CompilerHLSL::emit_composite_constants()
// global constants directly.
bool emitted = false;
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeConstant)
{
@ -1020,7 +1020,7 @@ void CompilerHLSL::emit_specialization_constants()
SpecializationConstant wg_x, wg_y, wg_z;
uint32_t workgroup_size_id = get_work_group_size_specialization_constants(wg_x, wg_y, wg_z);
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeConstant)
{
@ -1063,14 +1063,14 @@ void CompilerHLSL::replace_illegal_names()
"line", "linear", "matrix", "point", "row_major", "sampler",
};
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
if (!is_hidden_variable(var))
{
auto &m = meta[var.self].decoration;
auto &m = ir.meta[var.self].decoration;
if (keywords.find(m.alias) != end(keywords))
m.alias = join("_", m.alias);
}
@ -1090,14 +1090,14 @@ void CompilerHLSL::emit_resources()
// Output all basic struct types which are not Block or BufferBlock as these are declared inplace
// when such variables are instantiated.
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeType)
{
auto &type = id.get<SPIRType>();
if (type.basetype == SPIRType::Struct && type.array.empty() && !type.pointer &&
(!meta[type.self].decoration.decoration_flags.get(DecorationBlock) &&
!meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock)))
(!ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) &&
!ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock)))
{
emit_struct(type);
}
@ -1109,7 +1109,7 @@ void CompilerHLSL::emit_resources()
bool emitted = false;
// Output UBOs and SSBOs
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
@ -1117,8 +1117,8 @@ void CompilerHLSL::emit_resources()
auto &type = get<SPIRType>(var.basetype);
bool is_block_storage = type.storage == StorageClassStorageBuffer || type.storage == StorageClassUniform;
bool has_block_flags = meta[type.self].decoration.decoration_flags.get(DecorationBlock) ||
meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
bool has_block_flags = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) ||
ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
if (var.storage != StorageClassFunction && type.pointer && is_block_storage && !is_hidden_variable(var) &&
has_block_flags)
@ -1130,7 +1130,7 @@ void CompilerHLSL::emit_resources()
}
// Output push constant blocks
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
@ -1152,7 +1152,7 @@ void CompilerHLSL::emit_resources()
}
// Output Uniform Constants (values, samplers, images, etc).
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
@ -1176,13 +1176,13 @@ void CompilerHLSL::emit_resources()
// Emit builtin input and output variables here.
emit_builtin_variables();
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
bool block = meta[type.self].decoration.decoration_flags.get(DecorationBlock);
bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock);
// Do not emit I/O blocks here.
// I/O blocks can be arrayed, so we must deal with them separately to support geometry shaders
@ -1208,13 +1208,13 @@ void CompilerHLSL::emit_resources()
unordered_set<uint32_t> active_outputs;
vector<SPIRVariable *> input_variables;
vector<SPIRVariable *> output_variables;
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
bool block = meta[type.self].decoration.decoration_flags.get(DecorationBlock);
bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock);
if (var.storage != StorageClassInput && var.storage != StorageClassOutput)
continue;
@ -1801,13 +1801,13 @@ void CompilerHLSL::emit_struct_member(const SPIRType &type, uint32_t member_type
auto &membertype = get<SPIRType>(member_type_id);
Bitset memberflags;
auto &memb = meta[type.self].members;
auto &memb = ir.meta[type.self].members;
if (index < memb.size())
memberflags = memb[index].decoration_flags;
string qualifiers;
bool is_block = meta[type.self].decoration.decoration_flags.get(DecorationBlock) ||
meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
bool is_block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) ||
ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
if (is_block)
qualifiers = to_interpolation_qualifiers(memberflags);
@ -1838,7 +1838,7 @@ void CompilerHLSL::emit_buffer_block(const SPIRVariable &var)
if (is_uav)
{
Bitset flags = get_buffer_block_flags(var);
Bitset flags = ir.get_buffer_block_flags(var);
bool is_readonly = flags.get(DecorationNonWritable);
bool is_coherent = flags.get(DecorationCoherent);
add_resource_name(var.self);
@ -1918,7 +1918,7 @@ void CompilerHLSL::emit_push_constant_block(const SPIRVariable &var)
flattened_structs.insert(var.self);
type.member_name_cache.clear();
add_resource_name(var.self);
auto &memb = meta[type.self].members;
auto &memb = ir.meta[type.self].members;
statement("cbuffer SPIRV_CROSS_RootConstant_", to_name(var.self),
to_resource_register('b', layout.binding, layout.space));
@ -1994,7 +1994,7 @@ string CompilerHLSL::to_func_call_arg(uint32_t id)
void CompilerHLSL::emit_function_prototype(SPIRFunction &func, const Bitset &return_flags)
{
if (func.self != entry_point)
if (func.self != ir.default_entry_point)
add_function_overload(func);
auto &execution = get_entry_point();
@ -2016,7 +2016,7 @@ void CompilerHLSL::emit_function_prototype(SPIRFunction &func, const Bitset &ret
decl = "void ";
}
if (func.self == entry_point)
if (func.self == ir.default_entry_point)
{
if (execution.model == ExecutionModelVertex)
decl += "vert_main";
@ -2087,13 +2087,13 @@ void CompilerHLSL::emit_hlsl_entry_point()
arguments.push_back("SPIRV_Cross_Input stage_input");
// Add I/O blocks as separate arguments with appropriate storage qualifier.
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
bool block = meta[type.self].decoration.decoration_flags.get(DecorationBlock);
bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock);
if (var.storage != StorageClassInput && var.storage != StorageClassOutput)
continue;
@ -2257,13 +2257,13 @@ void CompilerHLSL::emit_hlsl_entry_point()
});
// Copy from stage input struct to globals.
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
bool block = meta[type.self].decoration.decoration_flags.get(DecorationBlock);
bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock);
if (var.storage != StorageClassInput)
continue;
@ -2307,13 +2307,13 @@ void CompilerHLSL::emit_hlsl_entry_point()
SPIRV_CROSS_THROW("Unsupported shader stage.");
// Copy block outputs.
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
bool block = meta[type.self].decoration.decoration_flags.get(DecorationBlock);
bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock);
if (var.storage != StorageClassOutput)
continue;
@ -2361,13 +2361,13 @@ void CompilerHLSL::emit_hlsl_entry_point()
}
});
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
bool block = meta[type.self].decoration.decoration_flags.get(DecorationBlock);
bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock);
if (var.storage != StorageClassOutput)
continue;
@ -2421,13 +2421,10 @@ void CompilerHLSL::emit_fixup()
void CompilerHLSL::emit_texture_op(const Instruction &i)
{
auto ops = stream(i);
auto *ops = stream(i);
auto op = static_cast<Op>(i.op);
uint32_t length = i.length;
if (i.offset + length > spirv.size())
SPIRV_CROSS_THROW("Compiler::parse() opcode out of range.");
vector<uint32_t> inherited_expressions;
uint32_t result_type = ops[0];
@ -2886,7 +2883,7 @@ string CompilerHLSL::to_resource_binding(const SPIRVariable &var)
{
if (has_decoration(type.self, DecorationBufferBlock))
{
Bitset flags = get_buffer_block_flags(var);
Bitset flags = ir.get_buffer_block_flags(var);
bool is_readonly = flags.get(DecorationNonWritable);
space = is_readonly ? 't' : 'u'; // UAV
}
@ -4550,7 +4547,7 @@ uint32_t CompilerHLSL::remap_num_workgroups_builtin()
return 0;
// Create a new, fake UBO.
uint32_t offset = increase_bound_by(4);
uint32_t offset = ir.increase_bound_by(4);
uint32_t uint_type_id = offset;
uint32_t block_type_id = offset + 1;
@ -4582,7 +4579,7 @@ uint32_t CompilerHLSL::remap_num_workgroups_builtin()
ptr_type.self = block_type_id;
set<SPIRVariable>(variable_id, block_pointer_type_id, StorageClassUniform);
meta[variable_id].decoration.alias = "SPIRV_Cross_NumWorkgroups";
ir.meta[variable_id].decoration.alias = "SPIRV_Cross_NumWorkgroups";
num_workgroups_builtin = variable_id;
return variable_id;
@ -4635,7 +4632,7 @@ string CompilerHLSL::compile()
emit_header();
emit_resources();
emit_function(get<SPIRFunction>(entry_point), Bitset());
emit_function(get<SPIRFunction>(ir.default_entry_point), Bitset());
emit_hlsl_entry_point();
pass_count++;

View File

@ -56,13 +56,23 @@ public:
bool point_coord_compat = false;
};
CompilerHLSL(std::vector<uint32_t> spirv_)
explicit CompilerHLSL(std::vector<uint32_t> spirv_)
: CompilerGLSL(move(spirv_))
{
}
CompilerHLSL(const uint32_t *ir, size_t size)
: CompilerGLSL(ir, size)
CompilerHLSL(const uint32_t *ir_, size_t size)
: CompilerGLSL(ir_, size)
{
}
explicit CompilerHLSL(const ParsedIR &ir_)
: CompilerGLSL(ir_)
{
}
explicit CompilerHLSL(ParsedIR &&ir_)
: CompilerGLSL(std::move(ir_))
{
}

View File

@ -43,9 +43,35 @@ CompilerMSL::CompilerMSL(vector<uint32_t> spirv_, vector<MSLVertexAttr> *p_vtx_a
resource_bindings.push_back(&rb);
}
CompilerMSL::CompilerMSL(const uint32_t *ir, size_t word_count, MSLVertexAttr *p_vtx_attrs, size_t vtx_attrs_count,
CompilerMSL::CompilerMSL(const uint32_t *ir_, size_t word_count, MSLVertexAttr *p_vtx_attrs, size_t vtx_attrs_count,
MSLResourceBinding *p_res_bindings, size_t res_bindings_count)
: CompilerGLSL(ir, word_count)
: CompilerGLSL(ir_, word_count)
{
if (p_vtx_attrs)
for (size_t i = 0; i < vtx_attrs_count; i++)
vtx_attrs_by_location[p_vtx_attrs[i].location] = &p_vtx_attrs[i];
if (p_res_bindings)
for (size_t i = 0; i < res_bindings_count; i++)
resource_bindings.push_back(&p_res_bindings[i]);
}
CompilerMSL::CompilerMSL(const ParsedIR &ir_, MSLVertexAttr *p_vtx_attrs, size_t vtx_attrs_count,
MSLResourceBinding *p_res_bindings, size_t res_bindings_count)
: CompilerGLSL(ir_)
{
if (p_vtx_attrs)
for (size_t i = 0; i < vtx_attrs_count; i++)
vtx_attrs_by_location[p_vtx_attrs[i].location] = &p_vtx_attrs[i];
if (p_res_bindings)
for (size_t i = 0; i < res_bindings_count; i++)
resource_bindings.push_back(&p_res_bindings[i]);
}
CompilerMSL::CompilerMSL(ParsedIR &&ir_, MSLVertexAttr *p_vtx_attrs, size_t vtx_attrs_count,
MSLResourceBinding *p_res_bindings, size_t res_bindings_count)
: CompilerGLSL(std::move(ir_))
{
if (p_vtx_attrs)
for (size_t i = 0; i < vtx_attrs_count; i++)
@ -64,23 +90,23 @@ void CompilerMSL::build_implicit_builtins()
bool has_frag_coord = false;
bool has_sample_id = false;
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() != TypeVariable)
continue;
auto &var = id.get<SPIRVariable>();
if (need_subpass_input && var.storage == StorageClassInput && meta[var.self].decoration.builtin &&
meta[var.self].decoration.builtin_type == BuiltInFragCoord)
if (need_subpass_input && var.storage == StorageClassInput && ir.meta[var.self].decoration.builtin &&
ir.meta[var.self].decoration.builtin_type == BuiltInFragCoord)
{
builtin_frag_coord_id = var.self;
has_frag_coord = true;
break;
}
if (need_sample_pos && var.storage == StorageClassInput && meta[var.self].decoration.builtin &&
meta[var.self].decoration.builtin_type == BuiltInSampleId)
if (need_sample_pos && var.storage == StorageClassInput && ir.meta[var.self].decoration.builtin &&
ir.meta[var.self].decoration.builtin_type == BuiltInSampleId)
{
builtin_sample_id_id = var.self;
has_sample_id = true;
@ -90,7 +116,7 @@ void CompilerMSL::build_implicit_builtins()
if (!has_frag_coord && need_subpass_input)
{
uint32_t offset = increase_bound_by(3);
uint32_t offset = ir.increase_bound_by(3);
uint32_t type_id = offset;
uint32_t type_ptr_id = offset + 1;
uint32_t var_id = offset + 2;
@ -117,7 +143,7 @@ void CompilerMSL::build_implicit_builtins()
if (!has_sample_id && need_sample_pos)
{
uint32_t offset = increase_bound_by(3);
uint32_t offset = ir.increase_bound_by(3);
uint32_t type_id = offset;
uint32_t type_ptr_id = offset + 1;
uint32_t var_id = offset + 2;
@ -144,7 +170,7 @@ void CompilerMSL::build_implicit_builtins()
if (msl_options.swizzle_texture_samples && has_sampled_images)
{
uint32_t offset = increase_bound_by(5);
uint32_t offset = ir.increase_bound_by(5);
uint32_t type_id = offset;
uint32_t type_arr_id = offset + 1;
uint32_t struct_id = offset + 2;
@ -439,7 +465,7 @@ string CompilerMSL::compile()
emit_specialization_constants();
emit_resources();
emit_custom_functions();
emit_function(get<SPIRFunction>(entry_point), Bitset());
emit_function(get<SPIRFunction>(ir.default_entry_point), Bitset());
pass_count++;
} while (force_recompile);
@ -477,7 +503,7 @@ string CompilerMSL::compile(MSLConfiguration &msl_cfg, vector<MSLVertexAttr> *p_
void CompilerMSL::preprocess_op_codes()
{
OpCodePreprocessor preproc(*this);
traverse_all_reachable_opcodes(get<SPIRFunction>(entry_point), preproc);
traverse_all_reachable_opcodes(get<SPIRFunction>(ir.default_entry_point), preproc);
if (preproc.suppress_missing_prototypes)
add_pragma_line("#pragma clang diagnostic ignored \"-Wmissing-prototypes\"");
@ -497,7 +523,7 @@ void CompilerMSL::preprocess_op_codes()
// Non-constant variables cannot have global scope in Metal.
void CompilerMSL::localize_global_variables()
{
auto &entry_func = get<SPIRFunction>(entry_point);
auto &entry_func = get<SPIRFunction>(ir.default_entry_point);
auto iter = global_variables.begin();
while (iter != global_variables.end())
{
@ -517,7 +543,7 @@ void CompilerMSL::localize_global_variables()
// Turn off specialization of any constants that are used for array lengths.
void CompilerMSL::resolve_specialized_array_lengths()
{
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeConstant)
{
@ -534,7 +560,7 @@ void CompilerMSL::extract_global_variables_from_functions()
{
// Uniforms
unordered_set<uint32_t> global_var_ids;
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
@ -549,14 +575,14 @@ void CompilerMSL::extract_global_variables_from_functions()
}
// Local vars that are declared in the main function and accessed directly by a function
auto &entry_func = get<SPIRFunction>(entry_point);
auto &entry_func = get<SPIRFunction>(ir.default_entry_point);
for (auto &var : entry_func.local_variables)
if (get<SPIRVariable>(var).storage != StorageClassFunction)
global_var_ids.insert(var);
std::set<uint32_t> added_arg_ids;
unordered_set<uint32_t> processed_func_ids;
extract_global_variables_from_function(entry_point, added_arg_ids, global_var_ids, processed_func_ids);
extract_global_variables_from_function(ir.default_entry_point, added_arg_ids, global_var_ids, processed_func_ids);
}
// MSL does not support the use of global variables for shader input content.
@ -655,7 +681,7 @@ void CompilerMSL::extract_global_variables_from_function(uint32_t func_id, std::
function_global_vars[func_id] = added_arg_ids;
// Add the global variables as arguments to the function
if (func_id != entry_point)
if (func_id != ir.default_entry_point)
{
for (uint32_t arg_id : added_arg_ids)
{
@ -677,7 +703,7 @@ void CompilerMSL::extract_global_variables_from_function(uint32_t func_id, std::
if (is_builtin && has_active_builtin(builtin, var.storage))
{
// Add a arg variable with the same type and decorations as the member
uint32_t next_ids = increase_bound_by(2);
uint32_t next_ids = ir.increase_bound_by(2);
uint32_t ptr_type_id = next_ids + 0;
uint32_t var_id = next_ids + 1;
@ -690,20 +716,20 @@ void CompilerMSL::extract_global_variables_from_function(uint32_t func_id, std::
func.add_parameter(mbr_type_id, var_id, true);
set<SPIRVariable>(var_id, ptr_type_id, StorageClassFunction);
meta[var_id].decoration = meta[type_id].members[mbr_idx];
ir.meta[var_id].decoration = ir.meta[type_id].members[mbr_idx];
}
mbr_idx++;
}
}
else
{
uint32_t next_id = increase_bound_by(1);
uint32_t next_id = ir.increase_bound_by(1);
func.add_parameter(type_id, next_id, true);
set<SPIRVariable>(next_id, type_id, StorageClassFunction, 0, arg_id);
// Ensure the existing variable has a valid name and the new variable has all the same meta info
set_name(arg_id, ensure_valid_name(to_name(arg_id), "v"));
meta[next_id] = meta[arg_id];
ir.meta[next_id] = ir.meta[arg_id];
}
}
}
@ -713,7 +739,7 @@ void CompilerMSL::extract_global_variables_from_function(uint32_t func_id, std::
// that are recursively contained within the type referenced by that variable should be packed tightly.
void CompilerMSL::mark_packable_structs()
{
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
@ -778,7 +804,7 @@ uint32_t CompilerMSL::add_interface_block(StorageClass storage)
// Accumulate the variables that should appear in the interface struct
vector<SPIRVariable *> vars;
bool incl_builtins = (storage == StorageClassOutput);
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
@ -799,7 +825,7 @@ uint32_t CompilerMSL::add_interface_block(StorageClass storage)
// Add a new typed variable for this interface structure.
// The initializer expression is allocated here, but populated when the function
// declaraion is emitted, because it is cleared after each compilation pass.
uint32_t next_id = increase_bound_by(3);
uint32_t next_id = ir.increase_bound_by(3);
uint32_t ib_type_id = next_id++;
auto &ib_type = set<SPIRType>(ib_type_id);
ib_type.basetype = SPIRType::Struct;
@ -827,7 +853,7 @@ uint32_t CompilerMSL::add_interface_block(StorageClass storage)
// Indicate the output var requires early initialization.
bool ep_should_return_output = !get_is_rasterization_disabled();
uint32_t rtn_id = ep_should_return_output ? ib_var_id : 0;
auto &entry_func = get<SPIRFunction>(entry_point);
auto &entry_func = get<SPIRFunction>(ir.default_entry_point);
entry_func.add_local_variable(ib_var_id);
for (auto &blk_id : entry_func.blocks)
{
@ -843,10 +869,10 @@ uint32_t CompilerMSL::add_interface_block(StorageClass storage)
break;
}
set_name(ib_type_id, to_name(entry_point) + "_" + ib_var_ref);
set_name(ib_type_id, to_name(ir.default_entry_point) + "_" + ib_var_ref);
set_name(ib_var_id, ib_var_ref);
auto &entry_func = get<SPIRFunction>(entry_point);
auto &entry_func = get<SPIRFunction>(ir.default_entry_point);
for (auto p_var : vars)
{
@ -1083,7 +1109,7 @@ uint32_t CompilerMSL::add_interface_block(StorageClass storage)
// Update the original variable reference to include the structure reference
string qual_var_name = ib_var_ref + "." + mbr_name;
meta[p_var->self].decoration.qualified_alias = qual_var_name;
ir.meta[p_var->self].decoration.qualified_alias = qual_var_name;
// Copy the variable location from the original variable to the member
if (get_decoration_bitset(p_var->self).get(DecorationLocation))
@ -1128,7 +1154,7 @@ uint32_t CompilerMSL::add_interface_block(StorageClass storage)
}
// Sort the members of the structure by their locations.
MemberSorter member_sorter(ib_type, meta[ib_type_id], MemberSorter::Location);
MemberSorter member_sorter(ib_type, ir.meta[ib_type_id], MemberSorter::Location);
member_sorter.sort();
return ib_var_id;
@ -1144,7 +1170,7 @@ uint32_t CompilerMSL::ensure_correct_builtin_type(uint32_t type_id, BuiltIn buil
if ((builtin == BuiltInSampleMask && is_array(type)) ||
((builtin == BuiltInLayer || builtin == BuiltInViewportIndex) && type.basetype != SPIRType::UInt))
{
uint32_t next_id = increase_bound_by(type.pointer ? 2 : 1);
uint32_t next_id = ir.increase_bound_by(type.pointer ? 2 : 1);
uint32_t base_type_id = next_id++;
auto &base_type = set<SPIRType>(base_type_id);
base_type.basetype = SPIRType::UInt;
@ -1175,7 +1201,7 @@ void CompilerMSL::align_struct(SPIRType &ib_type)
// Sort the members of the interface structure by their offset.
// They should already be sorted per SPIR-V spec anyway.
MemberSorter member_sorter(ib_type, meta[ib_type_id], MemberSorter::Offset);
MemberSorter member_sorter(ib_type, ir.meta[ib_type_id], MemberSorter::Offset);
member_sorter.sort();
uint32_t curr_offset;
@ -1800,7 +1826,7 @@ void CompilerMSL::emit_custom_functions()
void CompilerMSL::declare_undefined_values()
{
bool emitted = false;
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeUndef)
{
@ -1821,7 +1847,7 @@ void CompilerMSL::declare_constant_arrays()
// global constants directly, so we are able to use constants as variable expressions.
bool emitted = false;
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeConstant)
{
@ -1848,7 +1874,7 @@ void CompilerMSL::emit_resources()
// Output non-builtin interface structs. These include local function structs
// and structs nested within uniform and read-write buffers.
unordered_set<uint32_t> declared_structs;
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeType)
{
@ -1896,7 +1922,7 @@ void CompilerMSL::emit_specialization_constants()
uint32_t workgroup_size_id = get_work_group_size_specialization_constants(wg_x, wg_y, wg_z);
bool emitted = false;
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeConstant)
{
@ -2520,12 +2546,12 @@ void CompilerMSL::emit_array_copy(const string &lhs, uint32_t rhs_id)
// Unfortunately, we cannot template on address space in MSL,
// so explicit address space redirection it is ...
bool is_constant = false;
if (ids[rhs_id].get_type() == TypeConstant)
if (ir.ids[rhs_id].get_type() == TypeConstant)
{
is_constant = true;
}
else if (var && var->remapped_variable && var->statically_assigned &&
ids[var->static_expression].get_type() == TypeConstant)
ir.ids[var->static_expression].get_type() == TypeConstant)
{
is_constant = true;
}
@ -2551,7 +2577,7 @@ bool CompilerMSL::maybe_emit_array_assignment(uint32_t id_lhs, uint32_t id_rhs)
if (var && var->remapped_variable && var->statically_assigned)
return true;
if (ids[id_rhs].get_type() == TypeConstant && var && var->deferred_declaration)
if (ir.ids[id_rhs].get_type() == TypeConstant && var && var->deferred_declaration)
{
// Special case, if we end up declaring a variable when assigning the constant array,
// we can avoid the copy by directly assigning the constant expression.
@ -2818,13 +2844,13 @@ void CompilerMSL::emit_interface_block(uint32_t ib_var_id)
// If this is the entry point function, Metal-specific return value and function arguments are added.
void CompilerMSL::emit_function_prototype(SPIRFunction &func, const Bitset &)
{
if (func.self != entry_point)
if (func.self != ir.default_entry_point)
add_function_overload(func);
local_variable_names = resource_names;
string decl;
processing_entry_point = (func.self == entry_point);
processing_entry_point = (func.self == ir.default_entry_point);
auto &type = get<SPIRType>(func.return_type);
@ -2865,7 +2891,7 @@ void CompilerMSL::emit_function_prototype(SPIRFunction &func, const Bitset &)
{
auto &ed_var = get<SPIRVariable>(var_id);
if (!ed_var.initializer)
ed_var.initializer = increase_bound_by(1);
ed_var.initializer = ir.increase_bound_by(1);
set<SPIRExpression>(ed_var.initializer, "{}", ed_var.basetype, true);
}
@ -3271,7 +3297,7 @@ string CompilerMSL::round_fp_tex_coords(string tex_coords, bool coord_is_fp)
// The ID must be a scalar constant.
string CompilerMSL::to_component_argument(uint32_t id)
{
if (ids[id].get_type() != TypeConstant)
if (ir.ids[id].get_type() != TypeConstant)
{
SPIRV_CROSS_THROW("ID " + to_string(id) + " is not an OpConstant.");
return "component::x";
@ -3347,7 +3373,7 @@ bool CompilerMSL::is_non_native_row_major_matrix(uint32_t id)
return false;
// Non-matrix or column-major matrix types do not need to be converted.
if (!meta[id].decoration.decoration_flags.get(DecorationRowMajor))
if (!ir.meta[id].decoration.decoration_flags.get(DecorationRowMajor))
return false;
// Generate a function that will swap matrix elements from row-major to column-major.
@ -3682,7 +3708,7 @@ string CompilerMSL::member_attribute_qualifier(const SPIRType &type, uint32_t in
// index as the location.
uint32_t CompilerMSL::get_ordered_member_location(uint32_t type_id, uint32_t index, uint32_t *comp)
{
auto &m = meta.at(type_id);
auto &m = ir.meta.at(type_id);
if (index < m.members.size())
{
auto &dec = m.members[index];
@ -3754,7 +3780,7 @@ string CompilerMSL::get_argument_address_space(const SPIRVariable &argument)
case StorageClassStorageBuffer:
{
bool readonly = get_buffer_block_flags(argument).get(DecorationNonWritable);
bool readonly = ir.get_buffer_block_flags(argument).get(DecorationNonWritable);
return readonly ? "const device" : "device";
}
@ -3766,7 +3792,7 @@ string CompilerMSL::get_argument_address_space(const SPIRVariable &argument)
bool ssbo = has_decoration(type.self, DecorationBufferBlock);
if (ssbo)
{
bool readonly = get_buffer_block_flags(argument).get(DecorationNonWritable);
bool readonly = ir.get_buffer_block_flags(argument).get(DecorationNonWritable);
return readonly ? "const device" : "device";
}
else
@ -3816,7 +3842,7 @@ string CompilerMSL::entry_point_args(bool append_comma)
vector<Resource> resources;
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
@ -3865,7 +3891,7 @@ string CompilerMSL::entry_point_args(bool append_comma)
{
case SPIRType::Struct:
{
auto &m = meta.at(type.self);
auto &m = ir.meta.at(type.self);
if (m.members.size() == 0)
break;
if (!type.array.empty())
@ -3920,14 +3946,14 @@ string CompilerMSL::entry_point_args(bool append_comma)
}
// Builtin variables
for (auto &id : ids)
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
uint32_t var_id = var.self;
BuiltIn bi_type = meta[var_id].decoration.builtin_type;
BuiltIn bi_type = ir.meta[var_id].decoration.builtin_type;
// Don't emit SamplePosition as a separate parameter. In the entry
// point, we get that by calling get_sample_position() on the sample ID.
@ -3943,7 +3969,7 @@ string CompilerMSL::entry_point_args(bool append_comma)
}
else
{
auto &entry_func = get<SPIRFunction>(entry_point);
auto &entry_func = get<SPIRFunction>(ir.default_entry_point);
entry_func.fixup_hooks_in.push_back([=]() {
statement(builtin_type_decl(bi_type), " ", to_expression(var_id), " = get_sample_position(",
to_expression(builtin_sample_id_id), ");");
@ -3970,7 +3996,7 @@ string CompilerMSL::entry_point_args(bool append_comma)
uint32_t CompilerMSL::get_metal_resource_index(SPIRVariable &var, SPIRType::BaseType basetype)
{
auto &execution = get_entry_point();
auto &var_dec = meta[var.self].decoration;
auto &var_dec = ir.meta[var.self].decoration;
uint32_t var_desc_set = (var.storage == StorageClassPushConstant) ? kPushConstDescSet : var_dec.set;
uint32_t var_binding = (var.storage == StorageClassPushConstant) ? kPushConstBinding : var_dec.binding;
@ -4100,9 +4126,9 @@ string CompilerMSL::argument_decl(const SPIRFunction::Parameter &arg)
// has a qualified name, use it, otherwise use the standard name.
string CompilerMSL::to_name(uint32_t id, bool allow_alias) const
{
if (current_function && (current_function->self == entry_point))
if (current_function && (current_function->self == ir.default_entry_point))
{
string qual_name = meta.at(id).decoration.qualified_alias;
string qual_name = ir.meta.at(id).decoration.qualified_alias;
if (!qual_name.empty())
return qual_name;
}
@ -4145,13 +4171,13 @@ void CompilerMSL::replace_illegal_names()
"saturate",
};
for (auto &id : ids)
for (auto &id : ir.ids)
{
switch (id.get_type())
{
case TypeVariable:
{
auto &dec = meta[id.get_id()].decoration;
auto &dec = ir.meta[id.get_id()].decoration;
if (keywords.find(dec.alias) != end(keywords))
dec.alias += "0";
@ -4160,7 +4186,7 @@ void CompilerMSL::replace_illegal_names()
case TypeFunction:
{
auto &dec = meta[id.get_id()].decoration;
auto &dec = ir.meta[id.get_id()].decoration;
if (illegal_func_names.find(dec.alias) != end(illegal_func_names))
dec.alias += "0";
@ -4169,7 +4195,7 @@ void CompilerMSL::replace_illegal_names()
case TypeType:
{
for (auto &mbr_dec : meta[id.get_id()].members)
for (auto &mbr_dec : ir.meta[id.get_id()].members)
if (keywords.find(mbr_dec.alias) != end(keywords))
mbr_dec.alias += "0";
@ -4181,7 +4207,7 @@ void CompilerMSL::replace_illegal_names()
}
}
for (auto &entry : entry_points)
for (auto &entry : ir.entry_points)
{
// Change both the entry point name and the alias, to keep them synced.
string &ep_name = entry.second.name;
@ -4189,7 +4215,7 @@ void CompilerMSL::replace_illegal_names()
ep_name += "0";
// Always write this because entry point might have been renamed earlier.
meta[entry.first].decoration.alias = ep_name;
ir.meta[entry.first].decoration.alias = ep_name;
}
CompilerGLSL::replace_illegal_names();
@ -4528,7 +4554,7 @@ string CompilerMSL::builtin_to_glsl(BuiltIn builtin, StorageClass storage)
case BuiltInLayer:
case BuiltInFragDepth:
case BuiltInSampleMask:
if (storage != StorageClassInput && current_function && (current_function->self == entry_point))
if (storage != StorageClassInput && current_function && (current_function->self == ir.default_entry_point))
return stage_out_var_name + "." + CompilerGLSL::builtin_to_glsl(builtin, storage);
break;
@ -4787,7 +4813,7 @@ void CompilerMSL::analyze_sampled_image_usage()
if (msl_options.swizzle_texture_samples)
{
SampledImageScanner scanner(*this);
traverse_all_reachable_opcodes(get<SPIRFunction>(entry_point), scanner);
traverse_all_reachable_opcodes(get<SPIRFunction>(ir.default_entry_point), scanner);
}
}
@ -4935,7 +4961,7 @@ CompilerMSL::SPVFuncImpl CompilerMSL::OpCodePreprocessor::get_spv_func_impl(Op o
uint32_t id_rhs = args[1];
const SPIRType *type = nullptr;
if (compiler.ids[id_rhs].get_type() != TypeNone)
if (compiler.ir.ids[id_rhs].get_type() != TypeNone)
{
// Could be a constant, or similar.
type = &compiler.expression_type(id_rhs);
@ -5187,7 +5213,8 @@ std::string CompilerMSL::to_initializer_expression(const SPIRVariable &var)
// FIXME: We cannot handle non-constant arrays being initialized.
// We will need to inject spvArrayCopy here somehow ...
auto &type = get<SPIRType>(var.basetype);
if (ids[var.initializer].get_type() == TypeConstant && (!type.array.empty() || type.basetype == SPIRType::Struct))
if (ir.ids[var.initializer].get_type() == TypeConstant &&
(!type.array.empty() || type.basetype == SPIRType::Struct))
return constant_expression(get<SPIRConstant>(var.initializer));
else
return CompilerGLSL::to_initializer_expression(var);

View File

@ -271,6 +271,13 @@ public:
CompilerMSL(const uint32_t *ir, size_t word_count, MSLVertexAttr *p_vtx_attrs = nullptr, size_t vtx_attrs_count = 0,
MSLResourceBinding *p_res_bindings = nullptr, size_t res_bindings_count = 0);
// Alternate constructors taking pre-parsed IR directly.
CompilerMSL(const ParsedIR &ir, MSLVertexAttr *p_vtx_attrs = nullptr, size_t vtx_attrs_count = 0,
MSLResourceBinding *p_res_bindings = nullptr, size_t res_bindings_count = 0);
CompilerMSL(ParsedIR &&ir, MSLVertexAttr *p_vtx_attrs = nullptr, size_t vtx_attrs_count = 0,
MSLResourceBinding *p_res_bindings = nullptr, size_t res_bindings_count = 0);
// Compiles the SPIR-V code into Metal Shading Language.
std::string compile() override;

1025
spirv_parser.cpp Normal file

File diff suppressed because it is too large Load Diff

94
spirv_parser.hpp Normal file
View File

@ -0,0 +1,94 @@
/*
* Copyright 2018 Arm Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SPIRV_CROSS_PARSER_HPP
#define SPIRV_CROSS_PARSER_HPP
#include "spirv_cross_parsed_ir.hpp"
#include <stdint.h>
#include <vector>
namespace spirv_cross
{
class Parser
{
public:
Parser(const uint32_t *spirv_data, size_t word_count);
Parser(std::vector<uint32_t> spirv);
void parse();
ParsedIR &get_parsed_ir()
{
return ir;
}
private:
ParsedIR ir;
SPIRFunction *current_function = nullptr;
SPIRBlock *current_block = nullptr;
void parse(const Instruction &instr);
const uint32_t *stream(const Instruction &instr) const;
template <typename T, typename... P>
T &set(uint32_t id, P &&... args)
{
auto &var = variant_set<T>(ir.ids.at(id), std::forward<P>(args)...);
var.self = id;
return var;
}
template <typename T>
T &get(uint32_t id)
{
return variant_get<T>(ir.ids.at(id));
}
template <typename T>
T *maybe_get(uint32_t id)
{
if (ir.ids.at(id).get_type() == T::type)
return &get<T>(id);
else
return nullptr;
}
template <typename T>
const T &get(uint32_t id) const
{
return variant_get<T>(ir.ids.at(id));
}
template <typename T>
const T *maybe_get(uint32_t id) const
{
if (ir.ids.at(id).get_type() == T::type)
return &get<T>(id);
else
return nullptr;
}
// This must be an ordered data structure so we always pick the same type aliases.
std::vector<uint32_t> global_struct_cache;
bool types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const;
bool variable_storage_is_aliased(const SPIRVariable &v) const;
void make_constant_null(uint32_t id, uint32_t type);
};
} // namespace spirv_cross
#endif

View File

@ -264,7 +264,7 @@ string CompilerReflection::compile()
void CompilerReflection::emit_types()
{
bool emitted_open_tag = false;
for (auto &id : ids)
for (auto &id : ir.ids)
{
auto idType = id.get_type();
if (idType == TypeType)
@ -360,7 +360,7 @@ void CompilerReflection::emit_type_member_qualifiers(const SPIRType &type, uint3
auto &membertype = get<SPIRType>(type.member_types[index]);
emit_type_array(membertype);
auto &memb = meta[type.self].members;
auto &memb = ir.meta[type.self].members;
if (index < memb.size())
{
auto &dec = memb[index];
@ -437,7 +437,7 @@ void CompilerReflection::emit_resources(const char *tag, const vector<Resource>
for (auto &res : resources)
{
auto &type = get_type(res.type_id);
auto typeflags = meta[type.self].decoration.decoration_flags;
auto typeflags = ir.meta[type.self].decoration.decoration_flags;
auto &mask = get_decoration_bitset(res.id);
// If we don't have a name, use the fallback for the type instead of the variable
@ -565,7 +565,7 @@ void CompilerReflection::emit_specialization_constants()
string CompilerReflection::to_member_name(const SPIRType &type, uint32_t index) const
{
auto &memb = meta[type.self].members;
auto &memb = ir.meta[type.self].members;
if (index < memb.size() && !memb[index].alias.empty())
return memb[index].alias;
else

View File

@ -33,14 +33,26 @@ class CompilerReflection : public CompilerGLSL
using Parent = CompilerGLSL;
public:
CompilerReflection(std::vector<uint32_t> spirv_)
explicit CompilerReflection(std::vector<uint32_t> spirv_)
: Parent(move(spirv_))
{
options.vulkan_semantics = true;
}
CompilerReflection(const uint32_t *ir, size_t word_count)
: Parent(ir, word_count)
CompilerReflection(const uint32_t *ir_, size_t word_count)
: Parent(ir_, word_count)
{
options.vulkan_semantics = true;
}
explicit CompilerReflection(const ParsedIR &ir_)
: CompilerGLSL(ir_)
{
options.vulkan_semantics = true;
}
explicit CompilerReflection(ParsedIR &&ir_)
: CompilerGLSL(std::move(ir_))
{
options.vulkan_semantics = true;
}