Add support for multiple entry points.

- Only consider I/O variables if part of OpEntryPoint.
- Keep a safe fallback if #entry-points is 1 to avoid potentially
  breaking previously working shaders.
This commit is contained in:
Hans-Kristian Arntzen 2016-07-28 11:16:02 +02:00
parent 121f69927a
commit 042475e88e
10 changed files with 337 additions and 49 deletions

View File

@ -218,10 +218,37 @@ static void print_resources(const Compiler &compiler, const char *tag, const vec
fprintf(stderr, "=============\n\n");
}
static const char *execution_model_to_str(spv::ExecutionModel model)
{
switch (model)
{
case spv::ExecutionModelVertex:
return "vertex";
case spv::ExecutionModelTessellationControl:
return "tessellation control";
case ExecutionModelTessellationEvaluation:
return "tessellation evaluation";
case ExecutionModelGeometry:
return "geometry";
case ExecutionModelFragment:
return "fragment";
case ExecutionModelGLCompute:
return "compute";
default:
return "???";
}
}
static void print_resources(const Compiler &compiler, const ShaderResources &res)
{
uint64_t modes = compiler.get_execution_mode_mask();
fprintf(stderr, "Entry points:\n");
auto entry_points = compiler.get_entry_points();
for (auto &e : entry_points)
fprintf(stderr, " %s (%s)\n", e.c_str(), execution_model_to_str(compiler.get_entry_point(e).model));
fprintf(stderr, "\n");
fprintf(stderr, "Execution modes:\n");
for (unsigned i = 0; i < 64; i++)
{
@ -348,6 +375,7 @@ struct CLIArguments
vector<PLSArg> pls_out;
vector<Remap> remaps;
vector<string> extensions;
string entry;
uint32_t iterations = 1;
bool cpp = false;
@ -361,7 +389,7 @@ static void print_help()
"version>] [--dump-resources] [--help] [--force-temporary] [--cpp] [--cpp-interface-name <name>] "
"[--metal] [--vulkan-semantics] [--flatten-ubo] [--fixup-clipspace] [--iterations iter] [--pls-in "
"format input-name] [--pls-out format output-name] [--remap source_name target_name components] "
"[--extension ext]\n");
"[--extension ext] [--entry name]\n");
}
static bool remap_generic(Compiler &compiler, const vector<Resource> &resources, const Remap &remap)
@ -480,6 +508,7 @@ int main(int argc, char *argv[])
cbs.add("--metal", [&args](CLIParser &) { args.metal = true; });
cbs.add("--vulkan-semantics", [&args](CLIParser &) { args.vulkan_semantics = true; });
cbs.add("--extension", [&args](CLIParser &parser) { args.extensions.push_back(parser.next_string()); });
cbs.add("--entry", [&args](CLIParser &parser) { args.entry = parser.next_string(); });
cbs.add("--remap", [&args](CLIParser &parser) {
string src = parser.next_string();
string dst = parser.next_string();
@ -531,6 +560,9 @@ int main(int argc, char *argv[])
else
compiler = unique_ptr<CompilerGLSL>(new CompilerGLSL(read_spirv_file(args.input)));
if (!args.entry.empty())
compiler->set_entry_point(args.entry);
if (!args.set_version && !compiler->get_options().version)
{
fprintf(stderr, "Didn't specify GLSL version and SPIR-V did not specify language.\n");

View File

@ -0,0 +1,27 @@
#version 310 es
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
layout(binding = 0, std430) restrict buffer _6
{
ivec4 _0;
uvec4 _1;
} _8;
layout(binding = 1, std430) restrict buffer _7
{
uvec4 _0;
ivec4 _1;
} _9;
void main()
{
_9._0 = (_8._1 + uvec4(_8._0));
_9._0 = (uvec4(_8._0) + _8._1);
_9._0 = (_8._1 + _8._1);
_9._0 = uvec4(_8._0 + _8._0);
_9._1 = ivec4(_8._1 + _8._1);
_9._1 = (_8._0 + _8._0);
_9._1 = (ivec4(_8._1) + _8._0);
_9._1 = (_8._0 + ivec4(_8._1));
}

View File

@ -0,0 +1,97 @@
; SPIR-V
; Version: 1.0
; Generator: Khronos Glslang Reference Front End; 1
; Bound: 30
; Schema: 0
OpCapability Shader
%1 = OpExtInstImport "GLSL.std.450"
OpMemoryModel Logical GLSL450
OpEntryPoint Fragment %func_alt "main2" %frag_in %frag_out
OpEntryPoint GLCompute %func "main"
OpExecutionMode %func LocalSize 1 1 1
OpSource ESSL 310
OpSourceExtension "GL_GOOGLE_cpp_style_line_directive"
OpSourceExtension "GL_GOOGLE_include_directive"
OpMemberDecorate %input_struct 0 Offset 0
OpMemberDecorate %input_struct 1 Offset 16
OpMemberDecorate %output_struct 0 Offset 0
OpMemberDecorate %output_struct 1 Offset 16
OpDecorate %input_struct BufferBlock
OpDecorate %inputs DescriptorSet 0
OpDecorate %inputs Binding 0
OpDecorate %inputs Restrict
OpDecorate %output_struct BufferBlock
OpDecorate %outputs DescriptorSet 0
OpDecorate %outputs Binding 1
OpDecorate %outputs Restrict
OpDecorate %frag_in Location 0
OpDecorate %frag_out Location 0
%void = OpTypeVoid
%main_func = OpTypeFunction %void
%uint = OpTypeInt 32 0
%uvec4 = OpTypeVector %uint 4
%int = OpTypeInt 32 1
%ivec4 = OpTypeVector %int 4
%ivec4_ptr = OpTypePointer Uniform %ivec4
%uvec4_ptr = OpTypePointer Uniform %uvec4
%float = OpTypeFloat 32
%vec4 = OpTypeVector %float 4
%vec4_input_ptr = OpTypePointer Input %vec4
%vec4_output_ptr = OpTypePointer Output %vec4
%zero = OpConstant %int 0
%one = OpConstant %int 1
%input_struct = OpTypeStruct %ivec4 %uvec4
%input_struct_ptr = OpTypePointer Uniform %input_struct
%inputs = OpVariable %input_struct_ptr Uniform
%output_struct = OpTypeStruct %uvec4 %ivec4
%output_struct_ptr = OpTypePointer Uniform %output_struct
%outputs = OpVariable %output_struct_ptr Uniform
%frag_in = OpVariable %vec4_input_ptr Input
%frag_out = OpVariable %vec4_output_ptr Output
%func = OpFunction %void None %main_func
%block = OpLabel
%input1_ptr = OpAccessChain %ivec4_ptr %inputs %zero
%input0_ptr = OpAccessChain %uvec4_ptr %inputs %one
%input1 = OpLoad %ivec4 %input1_ptr
%input0 = OpLoad %uvec4 %input0_ptr
%output_ptr_uvec4 = OpAccessChain %uvec4_ptr %outputs %zero
%output_ptr_ivec4 = OpAccessChain %ivec4_ptr %outputs %one
; Test all variants of IAdd
%result_iadd_0 = OpIAdd %uvec4 %input0 %input1
%result_iadd_1 = OpIAdd %uvec4 %input1 %input0
%result_iadd_2 = OpIAdd %uvec4 %input0 %input0
%result_iadd_3 = OpIAdd %uvec4 %input1 %input1
%result_iadd_4 = OpIAdd %ivec4 %input0 %input0
%result_iadd_5 = OpIAdd %ivec4 %input1 %input1
%result_iadd_6 = OpIAdd %ivec4 %input0 %input1
%result_iadd_7 = OpIAdd %ivec4 %input1 %input0
OpStore %output_ptr_uvec4 %result_iadd_0
OpStore %output_ptr_uvec4 %result_iadd_1
OpStore %output_ptr_uvec4 %result_iadd_2
OpStore %output_ptr_uvec4 %result_iadd_3
OpStore %output_ptr_ivec4 %result_iadd_4
OpStore %output_ptr_ivec4 %result_iadd_5
OpStore %output_ptr_ivec4 %result_iadd_6
OpStore %output_ptr_ivec4 %result_iadd_7
OpReturn
OpFunctionEnd
%func_alt = OpFunction %void None %main_func
%block_alt = OpLabel
%frag_input_value = OpLoad %vec4 %frag_in
OpStore %frag_out %frag_input_value
OpReturn
OpFunctionEnd

View File

@ -232,6 +232,32 @@ struct SPIRExtension : IVariant
Extension ext;
};
// SPIREntryPoint is not a variant since its IDs are used to decorate OpFunction,
// so in order to avoid conflicts, we can't stick them in the ids array.
struct SPIREntryPoint
{
SPIREntryPoint(uint32_t self_, spv::ExecutionModel execution_model, std::string entry_name)
: self(self_)
, name(std::move(entry_name))
, model(execution_model)
{
}
SPIREntryPoint() = default;
uint32_t self = 0;
std::string name;
std::vector<uint32_t> interface_variables;
uint64_t flags = 0;
struct
{
uint32_t x = 0, y = 0, z = 0;
} workgroup_size;
uint32_t invocations = 0;
uint32_t output_vertices = 0;
spv::ExecutionModel model = {};
};
struct SPIRExpression : IVariant
{
enum

View File

@ -193,7 +193,8 @@ void CompilerCPP::emit_resources()
auto &type = get<SPIRType>(var.basetype);
if (var.storage != StorageClassFunction && !is_builtin_variable(var) && !var.remapped_variable &&
type.pointer && (var.storage == StorageClassInput || var.storage == StorageClassOutput))
type.pointer && (var.storage == StorageClassInput || var.storage == StorageClassOutput) &&
interface_variable_exists_in_entry_point(var.self))
{
emit_interface_block(var);
}
@ -244,7 +245,7 @@ void CompilerCPP::emit_resources()
statement("");
statement("Resources* __res;");
if (execution.model == ExecutionModelGLCompute)
if (get_entry_point().model == ExecutionModelGLCompute)
statement("ComputePrivateResources __priv_res;");
statement("");
@ -299,7 +300,7 @@ string CompilerCPP::compile()
emit_header();
emit_resources();
emit_function(get<SPIRFunction>(execution.entry_point), 0);
emit_function(get<SPIRFunction>(entry_point), 0);
pass_count++;
} while (force_recompile);
@ -362,7 +363,7 @@ void CompilerCPP::emit_function_prototype(SPIRFunction &func, uint64_t)
decl += type_to_glsl(type);
decl += " ";
if (func.self == execution.entry_point)
if (func.self == entry_point)
{
decl += "main";
processing_entry_point = true;
@ -424,6 +425,8 @@ string CompilerCPP::variable_decl(const SPIRType &type, const string &name)
void CompilerCPP::emit_header()
{
auto &execution = get_entry_point();
statement("// This C++ shader is autogenerated by spirv-cross.");
statement("#include \"spirv_cross/internal_interface.hpp\"");
statement("#include \"spirv_cross/external_interface.h\"");

View File

@ -419,7 +419,7 @@ ShaderResources Compiler::get_shader_resources() const
continue;
// Input
if (var.storage == StorageClassInput)
if (var.storage == StorageClassInput && interface_variable_exists_in_entry_point(var.self))
{
if (meta[type.self].decoration.decoration_flags & (1ull << DecorationBlock))
res.stage_inputs.push_back({ var.self, var.basetype, type.self, meta[type.self].decoration.alias });
@ -432,7 +432,7 @@ ShaderResources Compiler::get_shader_resources() const
res.subpass_inputs.push_back({ var.self, var.basetype, type.self, meta[var.self].decoration.alias });
}
// Outputs
else if (var.storage == StorageClassOutput)
else if (var.storage == StorageClassOutput && interface_variable_exists_in_entry_point(var.self))
{
if (meta[type.self].decoration.decoration_flags & (1ull << DecorationBlock))
res.stage_outputs.push_back({ var.self, var.basetype, type.self, meta[type.self].decoration.alias });
@ -929,20 +929,23 @@ void Compiler::parse(const Instruction &instruction)
case OpEntryPoint:
{
if (execution.entry_point)
throw CompilerError("More than one entry point not supported.");
auto itr = entry_points.emplace(ops[1], SPIREntryPoint(ops[1], static_cast<ExecutionModel>(ops[0]),
extract_string(spirv, instruction.offset + 2)));
auto &e = itr.first->second;
execution.model = static_cast<ExecutionModel>(ops[0]);
execution.entry_point = ops[1];
// Strings need nul-terminator and consume the whole word.
uint32_t strlen_words = (e.name.size() + 1 + 3) >> 2;
e.interface_variables.insert(end(e.interface_variables), ops + strlen_words + 2, ops + instruction.length);
// If we don't have an entry, make the first one our "default".
if (!entry_point)
entry_point = ops[1];
break;
}
case OpExecutionMode:
{
uint32_t entry = ops[0];
if (entry != execution.entry_point)
throw CompilerError("Cannot set execution mode to non-existing entry point.");
auto &execution = entry_points[ops[0]];
auto mode = static_cast<ExecutionMode>(ops[1]);
execution.flags |= 1ull << mode;
@ -1921,7 +1924,7 @@ std::vector<BufferRange> Compiler::get_active_buffer_ranges(uint32_t id) const
{
std::vector<BufferRange> ranges;
BufferAccessHandler handler(*this, ranges, id);
traverse_all_reachable_opcodes(get<SPIRFunction>(execution.entry_point), handler);
traverse_all_reachable_opcodes(get<SPIRFunction>(entry_point), handler);
return ranges;
}
@ -1974,11 +1977,13 @@ bool Compiler::types_are_logically_equivalent(const SPIRType &a, const SPIRType
uint64_t Compiler::get_execution_mode_mask() const
{
return execution.flags;
return get_entry_point().flags;
}
void Compiler::set_execution_mode(ExecutionMode mode, uint32_t arg0, uint32_t arg1, uint32_t arg2)
{
auto &execution = get_entry_point();
execution.flags |= 1ull << mode;
switch (mode)
{
@ -2003,11 +2008,13 @@ void Compiler::set_execution_mode(ExecutionMode mode, uint32_t arg0, uint32_t ar
void Compiler::unset_execution_mode(ExecutionMode mode)
{
auto &execution = get_entry_point();
execution.flags &= ~(1ull << mode);
}
uint32_t Compiler::get_execution_mode_argument(spv::ExecutionMode mode, uint32_t index) const
{
auto &execution = get_entry_point();
switch (mode)
{
case ExecutionModeLocalSize:
@ -2036,6 +2043,7 @@ uint32_t Compiler::get_execution_mode_argument(spv::ExecutionMode mode, uint32_t
ExecutionModel Compiler::get_execution_model() const
{
auto &execution = get_entry_point();
return execution.model;
}
@ -2076,3 +2084,69 @@ void Compiler::inherit_expression_dependencies(uint32_t dst, uint32_t source_exp
// Eliminate duplicated dependencies.
e_deps.erase(unique(begin(e_deps), end(e_deps)), end(e_deps));
}
vector<string> Compiler::get_entry_points() const
{
vector<string> entries;
for (auto &entry : entry_points)
entries.push_back(entry.second.name);
return entries;
}
void Compiler::set_entry_point(const std::string &name)
{
auto &entry = get_entry_point(name);
entry_point = entry.self;
}
SPIREntryPoint &Compiler::get_entry_point(const std::string &name)
{
auto itr =
find_if(begin(entry_points), end(entry_points),
[&](const std::pair<uint32_t, SPIREntryPoint> &entry) -> bool { return entry.second.name == name; });
if (itr == end(entry_points))
throw CompilerError("Entry point does not exist.");
return itr->second;
}
const SPIREntryPoint &Compiler::get_entry_point(const std::string &name) const
{
auto itr =
find_if(begin(entry_points), end(entry_points),
[&](const std::pair<uint32_t, SPIREntryPoint> &entry) -> bool { return entry.second.name == name; });
if (itr == end(entry_points))
throw CompilerError("Entry point does not exist.");
return itr->second;
}
const SPIREntryPoint &Compiler::get_entry_point() const
{
return entry_points.find(entry_point)->second;
}
SPIREntryPoint &Compiler::get_entry_point()
{
return entry_points.find(entry_point)->second;
}
bool Compiler::interface_variable_exists_in_entry_point(uint32_t id) const
{
auto &var = get<SPIRVariable>(id);
if (var.storage != StorageClassInput && var.storage != StorageClassOutput)
throw CompilerError("Only Input and Output variables are part of a shader linking interface.");
// This is to avoid potential problems with very old glslang versions which did
// not emit input/output interfaces properly.
// We can assume they only had a single entry point, and single entry point
// shaders could easily be assumed to use every interface variable anyways.
if (entry_points.size() <= 1)
return true;
auto &execution = get_entry_point();
return find(begin(execution.interface_variables), end(execution.interface_variables), id) !=
end(execution.interface_variables);
}

View File

@ -191,6 +191,18 @@ public:
void set_subpass_input_remapped_components(uint32_t id, uint32_t components);
uint32_t get_subpass_input_remapped_components(uint32_t id) const;
// All operations work on the current entry point.
// Entry points can be swapped out with set_entry_point().
// Entry points should be set right after the constructor completes as some reflection functions traverse the graph from the entry point.
// Resource reflection also depends on the entry point.
// By default, the current entry point is set to the first OpEntryPoint which appears in the SPIR-V module.
std::vector<std::string> get_entry_points() const;
void set_entry_point(const std::string &name);
// Returns the internal data structure for entry points to allow poking around.
const SPIREntryPoint &get_entry_point(const std::string &name) const;
SPIREntryPoint &get_entry_point(const std::string &name);
// Query and modify OpExecutionMode.
uint64_t get_execution_mode_mask() const;
void unset_execution_mode(spv::ExecutionMode mode);
@ -266,20 +278,12 @@ protected:
return nullptr;
}
struct Execution
{
uint64_t flags = 0;
spv::ExecutionModel model;
uint32_t entry_point = 0;
struct
{
uint32_t x = 0, y = 0, z = 0;
} workgroup_size;
uint32_t invocations = 0;
uint32_t output_vertices = 0;
Execution() = default;
} execution;
uint32_t entry_point = 0;
// Normally, we'd stick SPIREntryPoint in ids array, but it conflicts with SPIRFunction.
// Entry points can therefore be seen as some sort of meta structure.
std::unordered_map<uint32_t, SPIREntryPoint> entry_points;
const SPIREntryPoint &get_entry_point() const;
SPIREntryPoint &get_entry_point();
struct Source
{
@ -359,6 +363,10 @@ protected:
bool types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const;
void inherit_expression_dependencies(uint32_t dst, uint32_t source);
// For proper multiple entry point support, allow querying if an Input or Output
// variable is part of that entry points interface.
bool interface_variable_exists_in_entry_point(uint32_t id) const;
private:
void parse();
void parse(const Instruction &i);

View File

@ -221,7 +221,7 @@ string CompilerGLSL::compile()
emit_header();
emit_resources();
emit_function(get<SPIRFunction>(execution.entry_point), 0);
emit_function(get<SPIRFunction>(entry_point), 0);
pass_count++;
} while (force_recompile);
@ -231,6 +231,7 @@ string CompilerGLSL::compile()
void CompilerGLSL::emit_header()
{
auto &execution = get_entry_point();
statement("#version ", options.version, options.es && options.version > 100 ? " es" : "");
for (auto &header : header_lines)
@ -948,6 +949,7 @@ void CompilerGLSL::emit_buffer_block(const SPIRVariable &var)
void CompilerGLSL::emit_interface_block(const SPIRVariable &var)
{
auto &execution = get_entry_point();
auto &type = get<SPIRType>(var.basetype);
// Either make it plain in/out or in/out blocks depending on what shader is doing ...
@ -1064,6 +1066,7 @@ string CompilerGLSL::remap_swizzle(uint32_t result_type, uint32_t input_componen
void CompilerGLSL::emit_pls()
{
auto &execution = get_entry_point();
if (execution.model != ExecutionModelFragment)
throw CompilerError("Pixel local storage only supported in fragment shaders.");
@ -1096,6 +1099,8 @@ void CompilerGLSL::emit_pls()
void CompilerGLSL::emit_resources()
{
auto &execution = get_entry_point();
// Legacy GL uses gl_FragData[], redeclare all fragment outputs
// with builtins.
if (execution.model == ExecutionModelFragment && is_legacy())
@ -1183,7 +1188,8 @@ void CompilerGLSL::emit_resources()
auto &type = get<SPIRType>(var.basetype);
if (var.storage != StorageClassFunction && !is_builtin_variable(var) && !var.remapped_variable &&
type.pointer && (var.storage == StorageClassInput || var.storage == StorageClassOutput))
type.pointer && (var.storage == StorageClassInput || var.storage == StorageClassOutput) &&
interface_variable_exists_in_entry_point(var.self))
{
emit_interface_block(var);
emitted = true;
@ -4006,7 +4012,7 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
case OpControlBarrier:
{
// Ignore execution and memory scope.
if (execution.model == ExecutionModelGLCompute)
if (get_entry_point().model == ExecutionModelGLCompute)
{
uint32_t mem = get<SPIRConstant>(ops[2]).scalar();
if (mem == MemorySemanticsWorkgroupMemoryMask)
@ -4101,6 +4107,8 @@ const char *CompilerGLSL::flags_to_precision_qualifiers_glsl(const SPIRType &typ
{
if (options.es)
{
auto &execution = get_entry_point();
// Structs do not have precision qualifiers, neither do doubles (desktop only anyways, so no mediump/highp).
if (type.basetype != SPIRType::Float && type.basetype != SPIRType::Int && type.basetype != SPIRType::UInt &&
type.basetype != SPIRType::Image && type.basetype != SPIRType::SampledImage &&
@ -4525,7 +4533,7 @@ void CompilerGLSL::emit_function_prototype(SPIRFunction &func, uint64_t return_f
decl += type_to_glsl(type);
decl += " ";
if (func.self == execution.entry_point)
if (func.self == entry_point)
{
decl += "main";
processing_entry_point = true;
@ -4625,6 +4633,7 @@ void CompilerGLSL::emit_function(SPIRFunction &func, uint64_t return_flags)
void CompilerGLSL::emit_fixup()
{
auto &execution = get_entry_point();
if (execution.model == ExecutionModelVertex && options.vertex.fixup_clipspace)
{
const char *suffix = backend.float_literal_suffix ? "f" : "";

View File

@ -84,7 +84,7 @@ string CompilerMSL::compile(MSLConfiguration &msl_cfg, vector<MSLVertexAttr> *p_
emit_header();
emit_resources();
emit_function_declarations();
emit_function(get<SPIRFunction>(execution.entry_point), 0);
emit_function(get<SPIRFunction>(entry_point), 0);
pass_count++;
} while (force_recompile);
@ -120,7 +120,7 @@ void CompilerMSL::extract_builtins()
// Non-constant variables cannot have global scope in Metal.
void CompilerMSL::localize_global_variables()
{
auto &entry_func = get<SPIRFunction>(execution.entry_point);
auto &entry_func = get<SPIRFunction>(entry_point);
auto iter = global_variables.begin();
while (iter != global_variables.end())
{
@ -141,6 +141,8 @@ void CompilerMSL::localize_global_variables()
// Adds any interface structure variables needed by this shader
void CompilerMSL::add_interface_structs()
{
auto &execution = get_entry_point();
stage_in_var_ids.clear();
qual_pos_var_name = "";
@ -170,6 +172,8 @@ void CompilerMSL::add_interface_structs()
// from the binding info provided during compiler construction, matching by location.
void CompilerMSL::bind_vertex_attributes(std::set<uint32_t> &bindings)
{
auto &execution = get_entry_point();
if (execution.model == ExecutionModelVertex)
{
for (auto &id : ids)
@ -179,8 +183,8 @@ void CompilerMSL::bind_vertex_attributes(std::set<uint32_t> &bindings)
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
if (var.storage == StorageClassInput && (!is_builtin_variable(var)) && !var.remapped_variable &&
type.pointer)
if (var.storage == StorageClassInput && interface_variable_exists_in_entry_point(var.self) &&
(!is_builtin_variable(var)) && !var.remapped_variable && type.pointer)
{
auto &dec = meta[var.self].decoration;
MSLVertexAttr *p_va = vtx_attrs_by_location[dec.location];
@ -207,6 +211,7 @@ void CompilerMSL::bind_vertex_attributes(std::set<uint32_t> &bindings)
// Returns the ID of the newly added variable, or zero if no variable was added.
uint32_t CompilerMSL::add_interface_struct(StorageClass storage, uint32_t vtx_binding)
{
auto &execution = get_entry_point();
bool incl_builtins = (storage == StorageClassOutput);
bool match_binding = (execution.model == ExecutionModelVertex) && (storage == StorageClassInput);
@ -220,8 +225,9 @@ uint32_t CompilerMSL::add_interface_struct(StorageClass storage, uint32_t vtx_bi
auto &type = get<SPIRType>(var.basetype);
auto &dec = meta[var.self].decoration;
if (var.storage == storage && (!is_builtin_variable(var) || incl_builtins) &&
(!match_binding || (vtx_binding == dec.binding)) && !var.remapped_variable && type.pointer)
if (var.storage == storage && interface_variable_exists_in_entry_point(var.self) &&
(!is_builtin_variable(var) || incl_builtins) && (!match_binding || (vtx_binding == dec.binding)) &&
!var.remapped_variable && type.pointer)
{
vars.push_back(&var);
}
@ -264,7 +270,7 @@ uint32_t CompilerMSL::add_interface_struct(StorageClass storage, uint32_t vtx_bi
// Add the output interface struct as a local variable to the entry function,
// and force the entry function to return the output interface struct from
// any blocks that perform a function return.
auto &entry_func = get<SPIRFunction>(execution.entry_point);
auto &entry_func = get<SPIRFunction>(entry_point);
entry_func.add_local_variable(ib_var_id);
for (auto &blk_id : entry_func.blocks)
{
@ -456,7 +462,7 @@ void CompilerMSL::emit_function_declarations()
if (id.get_type() == TypeFunction)
{
auto &func = id.get<SPIRFunction>();
if (func.self != execution.entry_point)
if (func.self != entry_point)
emit_function_prototype(func, true);
}
@ -475,7 +481,7 @@ void CompilerMSL::emit_function_prototype(SPIRFunction &func, bool is_decl)
local_variable_names = resource_names;
string decl;
processing_entry_point = (func.self == execution.entry_point);
processing_entry_point = (func.self == entry_point);
auto &type = get<SPIRType>(func.return_type);
decl += func_type_decl(type);
@ -843,6 +849,8 @@ string CompilerMSL::to_sampler_expression(uint32_t id)
// Called automatically at the end of the entry point function
void CompilerMSL::emit_fixup()
{
auto &execution = get_entry_point();
if ((execution.model == ExecutionModelVertex) && stage_out_var_id && !qual_pos_var_name.empty())
{
if (options.vertex.fixup_clipspace)
@ -868,6 +876,7 @@ string CompilerMSL::member_decl(const SPIRType &type, const SPIRType &membertype
// Return a MSL qualifier for the specified function attribute member
string CompilerMSL::member_attribute_qualifier(const SPIRType &type, uint32_t index)
{
auto &execution = get_entry_point();
BuiltIn builtin;
bool is_builtin = is_member_builtin(type, index, &builtin);
@ -1008,7 +1017,7 @@ string CompilerMSL::constant_expression(const SPIRConstant &c)
// entry type if the current function is the entry point function
string CompilerMSL::func_type_decl(SPIRType &type)
{
auto &execution = get_entry_point();
// The regular function return type. If not processing the entry point function, that's all we need
string return_type = type_to_glsl(type);
if (!processing_entry_point)
@ -1056,6 +1065,7 @@ string CompilerMSL::clean_func_name(string func_name)
// Returns a string containing a comma-delimited list of args for the entry point function
string CompilerMSL::entry_point_args(bool append_comma)
{
auto &execution = get_entry_point();
string ep_args;
// Stage-in structures
@ -1145,7 +1155,7 @@ string CompilerMSL::entry_point_args(bool append_comma)
// Returns the Metal index of the resource of the specified type as used by the specified variable.
uint32_t CompilerMSL::get_metal_resource_index(SPIRVariable &var, SPIRType::BaseType basetype)
{
auto &execution = get_entry_point();
auto &var_dec = meta[var.self].decoration;
uint32_t var_desc_set = (var.storage == StorageClassPushConstant) ? kPushConstDescSet : var_dec.set;
uint32_t var_binding = (var.storage == StorageClassPushConstant) ? kPushConstBinding : var_dec.binding;
@ -1189,7 +1199,7 @@ uint32_t CompilerMSL::get_metal_resource_index(SPIRVariable &var, SPIRType::Base
// Returns the name of the entry point of this shader
string CompilerMSL::get_entry_point_name()
{
return clean_func_name(to_name(execution.entry_point));
return clean_func_name(to_name(entry_point));
}
// Returns the name of either the vertex index or instance index builtin
@ -1446,6 +1456,8 @@ string CompilerMSL::builtin_to_glsl(BuiltIn builtin)
// Returns an MSL string attribute qualifer for a SPIR-V builtin
string CompilerMSL::builtin_qualifier(BuiltIn builtin)
{
auto &execution = get_entry_point();
switch (builtin)
{
// Vertex function in

View File

@ -86,14 +86,14 @@ def cross_compile(shader, vulkan, spirv):
# subprocess.check_call(['spirv-val', spirv_path])
spirv_cross_path = './spirv-cross'
subprocess.check_call([spirv_cross_path, '--output', glsl_path, spirv_path])
subprocess.check_call([spirv_cross_path, '--entry', 'main', '--output', glsl_path, spirv_path])
# A shader might not be possible to make valid GLSL from, skip validation for this case.
if (not ('nocompat' in glsl_path)) and (not spirv):
validate_shader(glsl_path, False)
if vulkan or spirv:
subprocess.check_call([spirv_cross_path, '--vulkan-semantics', '--output', vulkan_glsl_path, spirv_path])
subprocess.check_call([spirv_cross_path, '--entry', 'main', '--vulkan-semantics', '--output', vulkan_glsl_path, spirv_path])
validate_shader(vulkan_glsl_path, vulkan)
return (spirv_path, glsl_path, vulkan_glsl_path if vulkan else None)