2018-10-05 09:30:57 +00:00
|
|
|
/*
|
2021-01-14 15:07:49 +00:00
|
|
|
* Copyright 2018-2021 Arm Limited
|
2021-05-08 08:47:48 +00:00
|
|
|
* SPDX-License-Identifier: Apache-2.0 OR MIT
|
2018-10-05 09:30:57 +00:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2020-11-25 14:22:08 +00:00
|
|
|
/*
|
|
|
|
* At your option, you may choose to accept this material under either:
|
|
|
|
* 1. The Apache License, Version 2.0, found at <http://www.apache.org/licenses/LICENSE-2.0>, or
|
|
|
|
* 2. The MIT License, found at <http://opensource.org/licenses/MIT>.
|
|
|
|
*/
|
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
#include "spirv_parser.hpp"
|
|
|
|
#include <assert.h>
|
|
|
|
|
|
|
|
using namespace std;
|
|
|
|
using namespace spv;
|
|
|
|
|
2019-03-29 09:29:44 +00:00
|
|
|
namespace SPIRV_CROSS_NAMESPACE
|
2018-10-05 09:30:57 +00:00
|
|
|
{
|
2019-04-09 10:46:23 +00:00
|
|
|
Parser::Parser(vector<uint32_t> spirv)
|
2018-10-05 09:30:57 +00:00
|
|
|
{
|
2022-03-02 23:02:38 +00:00
|
|
|
ir.spirv = std::move(spirv);
|
2018-10-05 09:30:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Parser::Parser(const uint32_t *spirv_data, size_t word_count)
|
|
|
|
{
|
2019-04-09 10:46:23 +00:00
|
|
|
ir.spirv = vector<uint32_t>(spirv_data, spirv_data + word_count);
|
2018-10-05 09:30:57 +00:00
|
|
|
}
|
|
|
|
|
2018-11-15 09:51:01 +00:00
|
|
|
static bool decoration_is_string(Decoration decoration)
|
|
|
|
{
|
|
|
|
switch (decoration)
|
|
|
|
{
|
|
|
|
case DecorationHlslSemanticGOOGLE:
|
|
|
|
return true;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
static inline uint32_t swap_endian(uint32_t v)
|
|
|
|
{
|
|
|
|
return ((v >> 24) & 0x000000ffu) | ((v >> 8) & 0x0000ff00u) | ((v << 8) & 0x00ff0000u) | ((v << 24) & 0xff000000u);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool is_valid_spirv_version(uint32_t version)
|
|
|
|
{
|
|
|
|
switch (version)
|
|
|
|
{
|
|
|
|
// Allow v99 since it tends to just work.
|
|
|
|
case 99:
|
|
|
|
case 0x10000: // SPIR-V 1.0
|
|
|
|
case 0x10100: // SPIR-V 1.1
|
|
|
|
case 0x10200: // SPIR-V 1.2
|
|
|
|
case 0x10300: // SPIR-V 1.3
|
2019-05-07 07:53:40 +00:00
|
|
|
case 0x10400: // SPIR-V 1.4
|
2019-09-19 08:26:04 +00:00
|
|
|
case 0x10500: // SPIR-V 1.5
|
2022-01-06 13:16:28 +00:00
|
|
|
case 0x10600: // SPIR-V 1.6
|
2018-10-05 09:30:57 +00:00
|
|
|
return true;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Parser::parse()
|
|
|
|
{
|
|
|
|
auto &spirv = ir.spirv;
|
|
|
|
|
|
|
|
auto len = spirv.size();
|
|
|
|
if (len < 5)
|
|
|
|
SPIRV_CROSS_THROW("SPIRV file too small.");
|
|
|
|
|
|
|
|
auto s = spirv.data();
|
|
|
|
|
|
|
|
// Endian-swap if we need to.
|
|
|
|
if (s[0] == swap_endian(MagicNumber))
|
|
|
|
transform(begin(spirv), end(spirv), begin(spirv), [](uint32_t c) { return swap_endian(c); });
|
|
|
|
|
|
|
|
if (s[0] != MagicNumber || !is_valid_spirv_version(s[1]))
|
|
|
|
SPIRV_CROSS_THROW("Invalid SPIRV format.");
|
|
|
|
|
|
|
|
uint32_t bound = s[3];
|
2020-02-14 11:57:01 +00:00
|
|
|
|
|
|
|
const uint32_t MaximumNumberOfIDs = 0x3fffff;
|
|
|
|
if (bound > MaximumNumberOfIDs)
|
|
|
|
SPIRV_CROSS_THROW("ID bound exceeds limit of 0x3fffff.\n");
|
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
ir.set_id_bounds(bound);
|
|
|
|
|
|
|
|
uint32_t offset = 5;
|
|
|
|
|
2019-04-02 09:19:03 +00:00
|
|
|
SmallVector<Instruction> instructions;
|
2018-10-05 09:30:57 +00:00
|
|
|
while (offset < len)
|
|
|
|
{
|
|
|
|
Instruction instr = {};
|
|
|
|
instr.op = spirv[offset] & 0xffff;
|
|
|
|
instr.count = (spirv[offset] >> 16) & 0xffff;
|
|
|
|
|
|
|
|
if (instr.count == 0)
|
|
|
|
SPIRV_CROSS_THROW("SPIR-V instructions cannot consume 0 words. Invalid SPIR-V file.");
|
|
|
|
|
|
|
|
instr.offset = offset + 1;
|
|
|
|
instr.length = instr.count - 1;
|
|
|
|
|
|
|
|
offset += instr.count;
|
|
|
|
|
|
|
|
if (offset > spirv.size())
|
|
|
|
SPIRV_CROSS_THROW("SPIR-V instruction goes out of bounds.");
|
|
|
|
|
|
|
|
instructions.push_back(instr);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto &i : instructions)
|
|
|
|
parse(i);
|
|
|
|
|
2020-05-25 09:05:42 +00:00
|
|
|
for (auto &fixup : forward_pointer_fixups)
|
|
|
|
{
|
|
|
|
auto &target = get<SPIRType>(fixup.first);
|
|
|
|
auto &source = get<SPIRType>(fixup.second);
|
|
|
|
target.member_types = source.member_types;
|
|
|
|
target.basetype = source.basetype;
|
|
|
|
target.self = source.self;
|
|
|
|
}
|
|
|
|
forward_pointer_fixups.clear();
|
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
if (current_function)
|
|
|
|
SPIRV_CROSS_THROW("Function was not terminated.");
|
|
|
|
if (current_block)
|
|
|
|
SPIRV_CROSS_THROW("Block was not terminated.");
|
2021-03-08 09:39:59 +00:00
|
|
|
if (ir.default_entry_point == 0)
|
|
|
|
SPIRV_CROSS_THROW("There is no entry point in the SPIR-V module.");
|
2018-10-05 09:30:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const uint32_t *Parser::stream(const Instruction &instr) const
|
|
|
|
{
|
|
|
|
// If we're not going to use any arguments, just return nullptr.
|
|
|
|
// We want to avoid case where we return an out of range pointer
|
|
|
|
// that trips debug assertions on some platforms.
|
|
|
|
if (!instr.length)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
if (instr.offset + instr.length > ir.spirv.size())
|
|
|
|
SPIRV_CROSS_THROW("Compiler::stream() out of range.");
|
|
|
|
return &ir.spirv[instr.offset];
|
|
|
|
}
|
|
|
|
|
2019-04-09 10:46:23 +00:00
|
|
|
static string extract_string(const vector<uint32_t> &spirv, uint32_t offset)
|
2018-10-05 09:30:57 +00:00
|
|
|
{
|
|
|
|
string ret;
|
|
|
|
for (uint32_t i = offset; i < spirv.size(); i++)
|
|
|
|
{
|
|
|
|
uint32_t w = spirv[i];
|
|
|
|
|
|
|
|
for (uint32_t j = 0; j < 4; j++, w >>= 8)
|
|
|
|
{
|
|
|
|
char c = w & 0xff;
|
|
|
|
if (c == '\0')
|
|
|
|
return ret;
|
|
|
|
ret += c;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
SPIRV_CROSS_THROW("String was not terminated before EOF");
|
|
|
|
}
|
|
|
|
|
|
|
|
void Parser::parse(const Instruction &instruction)
|
|
|
|
{
|
|
|
|
auto *ops = stream(instruction);
|
|
|
|
auto op = static_cast<Op>(instruction.op);
|
|
|
|
uint32_t length = instruction.length;
|
|
|
|
|
2022-09-05 10:31:22 +00:00
|
|
|
// HACK for glslang that might emit OpEmitMeshTasksEXT followed by return / branch.
|
|
|
|
// Instead of failing hard, just ignore it.
|
|
|
|
if (ignore_trailing_block_opcodes)
|
|
|
|
{
|
|
|
|
ignore_trailing_block_opcodes = false;
|
|
|
|
if (op == OpReturn || op == OpBranch || op == OpUnreachable)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
switch (op)
|
|
|
|
{
|
2018-11-15 01:04:36 +00:00
|
|
|
case OpSourceContinued:
|
2018-10-05 09:30:57 +00:00
|
|
|
case OpSourceExtension:
|
|
|
|
case OpNop:
|
2018-11-13 06:28:38 +00:00
|
|
|
case OpModuleProcessed:
|
2018-10-05 09:30:57 +00:00
|
|
|
break;
|
|
|
|
|
2019-05-28 11:41:46 +00:00
|
|
|
case OpString:
|
|
|
|
{
|
|
|
|
set<SPIRString>(ops[0], extract_string(ir.spirv, instruction.offset + 1));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-04-24 12:12:50 +00:00
|
|
|
case OpMemoryModel:
|
|
|
|
ir.addressing_model = static_cast<AddressingModel>(ops[0]);
|
|
|
|
ir.memory_model = static_cast<MemoryModel>(ops[1]);
|
|
|
|
break;
|
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
case OpSource:
|
|
|
|
{
|
|
|
|
auto lang = static_cast<SourceLanguage>(ops[0]);
|
|
|
|
switch (lang)
|
|
|
|
{
|
|
|
|
case SourceLanguageESSL:
|
|
|
|
ir.source.es = true;
|
|
|
|
ir.source.version = ops[1];
|
|
|
|
ir.source.known = true;
|
|
|
|
ir.source.hlsl = false;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SourceLanguageGLSL:
|
|
|
|
ir.source.es = false;
|
|
|
|
ir.source.version = ops[1];
|
|
|
|
ir.source.known = true;
|
|
|
|
ir.source.hlsl = false;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SourceLanguageHLSL:
|
|
|
|
// For purposes of cross-compiling, this is GLSL 450.
|
|
|
|
ir.source.es = false;
|
|
|
|
ir.source.version = 450;
|
|
|
|
ir.source.known = true;
|
|
|
|
ir.source.hlsl = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
ir.source.known = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpUndef:
|
|
|
|
{
|
|
|
|
uint32_t result_type = ops[0];
|
|
|
|
uint32_t id = ops[1];
|
|
|
|
set<SPIRUndef>(id, result_type);
|
2019-04-08 11:45:31 +00:00
|
|
|
if (current_block)
|
|
|
|
current_block->ops.push_back(instruction);
|
2018-10-05 09:30:57 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpCapability:
|
|
|
|
{
|
|
|
|
uint32_t cap = ops[0];
|
|
|
|
if (cap == CapabilityKernel)
|
|
|
|
SPIRV_CROSS_THROW("Kernel capability not supported.");
|
|
|
|
|
|
|
|
ir.declared_capabilities.push_back(static_cast<Capability>(ops[0]));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpExtension:
|
|
|
|
{
|
|
|
|
auto ext = extract_string(ir.spirv, instruction.offset);
|
2022-03-02 23:02:38 +00:00
|
|
|
ir.declared_extensions.push_back(std::move(ext));
|
2018-10-05 09:30:57 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpExtInstImport:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[0];
|
2022-11-08 11:14:51 +00:00
|
|
|
|
|
|
|
SPIRExtension::Extension spirv_ext = SPIRExtension::Unsupported;
|
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
auto ext = extract_string(ir.spirv, instruction.offset + 1);
|
|
|
|
if (ext == "GLSL.std.450")
|
2022-11-08 11:14:51 +00:00
|
|
|
spirv_ext = SPIRExtension::GLSL;
|
2019-07-04 08:03:06 +00:00
|
|
|
else if (ext == "DebugInfo")
|
2022-11-08 11:14:51 +00:00
|
|
|
spirv_ext = SPIRExtension::SPV_debug_info;
|
2018-10-05 09:30:57 +00:00
|
|
|
else if (ext == "SPV_AMD_shader_ballot")
|
2022-11-08 11:14:51 +00:00
|
|
|
spirv_ext = SPIRExtension::SPV_AMD_shader_ballot;
|
2018-10-05 09:30:57 +00:00
|
|
|
else if (ext == "SPV_AMD_shader_explicit_vertex_parameter")
|
2022-11-08 11:14:51 +00:00
|
|
|
spirv_ext = SPIRExtension::SPV_AMD_shader_explicit_vertex_parameter;
|
2018-10-05 09:30:57 +00:00
|
|
|
else if (ext == "SPV_AMD_shader_trinary_minmax")
|
2022-11-08 11:14:51 +00:00
|
|
|
spirv_ext = SPIRExtension::SPV_AMD_shader_trinary_minmax;
|
2018-10-05 09:30:57 +00:00
|
|
|
else if (ext == "SPV_AMD_gcn_shader")
|
2022-11-08 11:14:51 +00:00
|
|
|
spirv_ext = SPIRExtension::SPV_AMD_gcn_shader;
|
2022-04-19 10:07:54 +00:00
|
|
|
else if (ext == "NonSemantic.DebugPrintf")
|
2022-11-08 11:14:51 +00:00
|
|
|
spirv_ext = SPIRExtension::NonSemanticDebugPrintf;
|
|
|
|
else if (ext == "NonSemantic.Shader.DebugInfo.100")
|
|
|
|
spirv_ext = SPIRExtension::NonSemanticShaderDebugInfo;
|
2023-01-12 11:41:19 +00:00
|
|
|
else if (ext.find("NonSemantic.") == 0)
|
|
|
|
spirv_ext = SPIRExtension::NonSemanticGeneric;
|
2018-10-05 09:30:57 +00:00
|
|
|
|
2022-11-08 11:14:51 +00:00
|
|
|
set<SPIRExtension>(id, spirv_ext);
|
2018-10-05 09:30:57 +00:00
|
|
|
// Other SPIR-V extensions which have ExtInstrs are currently not supported.
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-07-04 08:03:06 +00:00
|
|
|
case OpExtInst:
|
|
|
|
{
|
|
|
|
// The SPIR-V debug information extended instructions might come at global scope.
|
|
|
|
if (current_block)
|
2022-02-16 10:49:24 +00:00
|
|
|
{
|
2019-07-04 08:03:06 +00:00
|
|
|
current_block->ops.push_back(instruction);
|
2022-02-16 10:49:24 +00:00
|
|
|
if (length >= 2)
|
|
|
|
{
|
|
|
|
const auto *type = maybe_get<SPIRType>(ops[0]);
|
|
|
|
if (type)
|
|
|
|
ir.load_type_width.insert({ ops[1], type->width });
|
|
|
|
}
|
|
|
|
}
|
2019-07-04 08:03:06 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
case OpEntryPoint:
|
|
|
|
{
|
|
|
|
auto itr =
|
|
|
|
ir.entry_points.insert(make_pair(ops[1], SPIREntryPoint(ops[1], static_cast<ExecutionModel>(ops[0]),
|
|
|
|
extract_string(ir.spirv, instruction.offset + 2))));
|
|
|
|
auto &e = itr.first->second;
|
|
|
|
|
|
|
|
// Strings need nul-terminator and consume the whole word.
|
|
|
|
uint32_t strlen_words = uint32_t((e.name.size() + 1 + 3) >> 2);
|
2019-09-05 10:43:40 +00:00
|
|
|
|
|
|
|
for (uint32_t i = strlen_words + 2; i < instruction.length; i++)
|
|
|
|
e.interface_variables.push_back(ops[i]);
|
2018-10-05 09:30:57 +00:00
|
|
|
|
|
|
|
// Set the name of the entry point in case OpName is not provided later.
|
|
|
|
ir.set_name(ops[1], e.name);
|
|
|
|
|
|
|
|
// If we don't have an entry, make the first one our "default".
|
|
|
|
if (!ir.default_entry_point)
|
|
|
|
ir.default_entry_point = ops[1];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpExecutionMode:
|
|
|
|
{
|
|
|
|
auto &execution = ir.entry_points[ops[0]];
|
|
|
|
auto mode = static_cast<ExecutionMode>(ops[1]);
|
|
|
|
execution.flags.set(mode);
|
|
|
|
|
|
|
|
switch (mode)
|
|
|
|
{
|
|
|
|
case ExecutionModeInvocations:
|
|
|
|
execution.invocations = ops[2];
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ExecutionModeLocalSize:
|
|
|
|
execution.workgroup_size.x = ops[2];
|
|
|
|
execution.workgroup_size.y = ops[3];
|
|
|
|
execution.workgroup_size.z = ops[4];
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ExecutionModeOutputVertices:
|
|
|
|
execution.output_vertices = ops[2];
|
|
|
|
break;
|
|
|
|
|
2022-09-02 14:31:04 +00:00
|
|
|
case ExecutionModeOutputPrimitivesEXT:
|
|
|
|
execution.output_primitives = ops[2];
|
|
|
|
break;
|
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-01-05 14:53:51 +00:00
|
|
|
case OpExecutionModeId:
|
|
|
|
{
|
|
|
|
auto &execution = ir.entry_points[ops[0]];
|
|
|
|
auto mode = static_cast<ExecutionMode>(ops[1]);
|
|
|
|
execution.flags.set(mode);
|
|
|
|
|
|
|
|
if (mode == ExecutionModeLocalSizeId)
|
|
|
|
{
|
|
|
|
execution.workgroup_size.id_x = ops[2];
|
|
|
|
execution.workgroup_size.id_y = ops[3];
|
|
|
|
execution.workgroup_size.id_z = ops[4];
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
case OpName:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[0];
|
|
|
|
ir.set_name(id, extract_string(ir.spirv, instruction.offset + 1));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpMemberName:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[0];
|
|
|
|
uint32_t member = ops[1];
|
|
|
|
ir.set_member_name(id, member, extract_string(ir.spirv, instruction.offset + 2));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-11-15 09:51:01 +00:00
|
|
|
case OpDecorationGroup:
|
|
|
|
{
|
|
|
|
// Noop, this simply means an ID should be a collector of decorations.
|
|
|
|
// The meta array is already a flat array of decorations which will contain the relevant decorations.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpGroupDecorate:
|
|
|
|
{
|
|
|
|
uint32_t group_id = ops[0];
|
|
|
|
auto &decorations = ir.meta[group_id].decoration;
|
|
|
|
auto &flags = decorations.decoration_flags;
|
|
|
|
|
|
|
|
// Copies decorations from one ID to another. Only copy decorations which are set in the group,
|
|
|
|
// i.e., we cannot just copy the meta structure directly.
|
|
|
|
for (uint32_t i = 1; i < length; i++)
|
|
|
|
{
|
|
|
|
uint32_t target = ops[i];
|
|
|
|
flags.for_each_bit([&](uint32_t bit) {
|
|
|
|
auto decoration = static_cast<Decoration>(bit);
|
|
|
|
|
|
|
|
if (decoration_is_string(decoration))
|
|
|
|
{
|
|
|
|
ir.set_decoration_string(target, decoration, ir.get_decoration_string(group_id, decoration));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
ir.meta[target].decoration_word_offset[decoration] =
|
|
|
|
ir.meta[group_id].decoration_word_offset[decoration];
|
|
|
|
ir.set_decoration(target, decoration, ir.get_decoration(group_id, decoration));
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpGroupMemberDecorate:
|
|
|
|
{
|
|
|
|
uint32_t group_id = ops[0];
|
|
|
|
auto &flags = ir.meta[group_id].decoration.decoration_flags;
|
|
|
|
|
|
|
|
// Copies decorations from one ID to another. Only copy decorations which are set in the group,
|
|
|
|
// i.e., we cannot just copy the meta structure directly.
|
|
|
|
for (uint32_t i = 1; i + 1 < length; i += 2)
|
|
|
|
{
|
|
|
|
uint32_t target = ops[i + 0];
|
|
|
|
uint32_t index = ops[i + 1];
|
|
|
|
flags.for_each_bit([&](uint32_t bit) {
|
|
|
|
auto decoration = static_cast<Decoration>(bit);
|
|
|
|
|
|
|
|
if (decoration_is_string(decoration))
|
|
|
|
ir.set_member_decoration_string(target, index, decoration,
|
|
|
|
ir.get_decoration_string(group_id, decoration));
|
|
|
|
else
|
|
|
|
ir.set_member_decoration(target, index, decoration, ir.get_decoration(group_id, decoration));
|
|
|
|
});
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
case OpDecorate:
|
|
|
|
case OpDecorateId:
|
|
|
|
{
|
2018-11-15 09:51:01 +00:00
|
|
|
// OpDecorateId technically supports an array of arguments, but our only supported decorations are single uint,
|
|
|
|
// so merge decorate and decorate-id here.
|
2018-10-05 09:30:57 +00:00
|
|
|
uint32_t id = ops[0];
|
|
|
|
|
|
|
|
auto decoration = static_cast<Decoration>(ops[1]);
|
|
|
|
if (length >= 3)
|
|
|
|
{
|
|
|
|
ir.meta[id].decoration_word_offset[decoration] = uint32_t(&ops[2] - ir.spirv.data());
|
|
|
|
ir.set_decoration(id, decoration, ops[2]);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
ir.set_decoration(id, decoration);
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpDecorateStringGOOGLE:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[0];
|
|
|
|
auto decoration = static_cast<Decoration>(ops[1]);
|
|
|
|
ir.set_decoration_string(id, decoration, extract_string(ir.spirv, instruction.offset + 2));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpMemberDecorate:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[0];
|
|
|
|
uint32_t member = ops[1];
|
|
|
|
auto decoration = static_cast<Decoration>(ops[2]);
|
|
|
|
if (length >= 4)
|
|
|
|
ir.set_member_decoration(id, member, decoration, ops[3]);
|
|
|
|
else
|
|
|
|
ir.set_member_decoration(id, member, decoration);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpMemberDecorateStringGOOGLE:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[0];
|
|
|
|
uint32_t member = ops[1];
|
|
|
|
auto decoration = static_cast<Decoration>(ops[2]);
|
|
|
|
ir.set_member_decoration_string(id, member, decoration, extract_string(ir.spirv, instruction.offset + 3));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build up basic types.
|
|
|
|
case OpTypeVoid:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[0];
|
2023-11-30 11:28:50 +00:00
|
|
|
auto &type = set<SPIRType>(id, op);
|
2018-10-05 09:30:57 +00:00
|
|
|
type.basetype = SPIRType::Void;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpTypeBool:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[0];
|
2023-11-30 11:28:50 +00:00
|
|
|
auto &type = set<SPIRType>(id, op);
|
2018-10-05 09:30:57 +00:00
|
|
|
type.basetype = SPIRType::Boolean;
|
|
|
|
type.width = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpTypeFloat:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[0];
|
|
|
|
uint32_t width = ops[1];
|
2023-11-30 11:28:50 +00:00
|
|
|
auto &type = set<SPIRType>(id, op);
|
2018-10-05 09:30:57 +00:00
|
|
|
if (width == 64)
|
|
|
|
type.basetype = SPIRType::Double;
|
|
|
|
else if (width == 32)
|
|
|
|
type.basetype = SPIRType::Float;
|
|
|
|
else if (width == 16)
|
|
|
|
type.basetype = SPIRType::Half;
|
|
|
|
else
|
|
|
|
SPIRV_CROSS_THROW("Unrecognized bit-width of floating point type.");
|
|
|
|
type.width = width;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpTypeInt:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[0];
|
|
|
|
uint32_t width = ops[1];
|
2018-11-28 06:20:24 +00:00
|
|
|
bool signedness = ops[2] != 0;
|
2023-11-30 11:28:50 +00:00
|
|
|
auto &type = set<SPIRType>(id, op);
|
2019-01-30 13:49:55 +00:00
|
|
|
type.basetype = signedness ? to_signed_basetype(width) : to_unsigned_basetype(width);
|
2018-10-05 09:30:57 +00:00
|
|
|
type.width = width;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build composite types by "inheriting".
|
|
|
|
// NOTE: The self member is also copied! For pointers and array modifiers this is a good thing
|
|
|
|
// since we can refer to decorations on pointee classes which is needed for UBO/SSBO, I/O blocks in geometry/tess etc.
|
|
|
|
case OpTypeVector:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[0];
|
|
|
|
uint32_t vecsize = ops[2];
|
|
|
|
|
|
|
|
auto &base = get<SPIRType>(ops[1]);
|
2023-12-07 11:18:35 +00:00
|
|
|
auto &vecbase = set<SPIRType>(id, base);
|
2018-10-05 09:30:57 +00:00
|
|
|
|
2023-11-30 14:42:46 +00:00
|
|
|
vecbase.op = op;
|
2018-10-05 09:30:57 +00:00
|
|
|
vecbase.vecsize = vecsize;
|
|
|
|
vecbase.self = id;
|
|
|
|
vecbase.parent_type = ops[1];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpTypeMatrix:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[0];
|
|
|
|
uint32_t colcount = ops[2];
|
|
|
|
|
|
|
|
auto &base = get<SPIRType>(ops[1]);
|
2023-12-07 11:18:35 +00:00
|
|
|
auto &matrixbase = set<SPIRType>(id, base);
|
2018-10-05 09:30:57 +00:00
|
|
|
|
2023-11-30 14:42:46 +00:00
|
|
|
matrixbase.op = op;
|
2018-10-05 09:30:57 +00:00
|
|
|
matrixbase.columns = colcount;
|
|
|
|
matrixbase.self = id;
|
|
|
|
matrixbase.parent_type = ops[1];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpTypeArray:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[0];
|
|
|
|
uint32_t tid = ops[1];
|
|
|
|
auto &base = get<SPIRType>(tid);
|
2023-12-07 11:18:35 +00:00
|
|
|
auto &arraybase = set<SPIRType>(id, base);
|
2018-10-05 09:30:57 +00:00
|
|
|
|
2023-11-30 14:42:46 +00:00
|
|
|
arraybase.op = op;
|
2018-10-05 09:30:57 +00:00
|
|
|
arraybase.parent_type = tid;
|
|
|
|
|
|
|
|
uint32_t cid = ops[2];
|
|
|
|
ir.mark_used_as_array_length(cid);
|
|
|
|
auto *c = maybe_get<SPIRConstant>(cid);
|
|
|
|
bool literal = c && !c->specialization;
|
|
|
|
|
2020-05-25 09:05:42 +00:00
|
|
|
// We're copying type information into Array types, so we'll need a fixup for any physical pointer
|
|
|
|
// references.
|
|
|
|
if (base.forward_pointer)
|
|
|
|
forward_pointer_fixups.push_back({ id, tid });
|
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
arraybase.array_size_literal.push_back(literal);
|
|
|
|
arraybase.array.push_back(literal ? c->scalar() : cid);
|
2023-12-07 11:18:35 +00:00
|
|
|
|
|
|
|
// .self resolves down to non-array/non-pointer type.
|
|
|
|
arraybase.self = base.self;
|
2018-10-05 09:30:57 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpTypeRuntimeArray:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[0];
|
|
|
|
|
|
|
|
auto &base = get<SPIRType>(ops[1]);
|
2023-12-07 11:18:35 +00:00
|
|
|
auto &arraybase = set<SPIRType>(id, base);
|
2018-10-05 09:30:57 +00:00
|
|
|
|
2020-05-25 09:05:42 +00:00
|
|
|
// We're copying type information into Array types, so we'll need a fixup for any physical pointer
|
|
|
|
// references.
|
|
|
|
if (base.forward_pointer)
|
|
|
|
forward_pointer_fixups.push_back({ id, ops[1] });
|
|
|
|
|
2023-11-30 14:42:46 +00:00
|
|
|
arraybase.op = op;
|
2018-10-05 09:30:57 +00:00
|
|
|
arraybase.array.push_back(0);
|
|
|
|
arraybase.array_size_literal.push_back(true);
|
|
|
|
arraybase.parent_type = ops[1];
|
2023-12-07 11:18:35 +00:00
|
|
|
|
|
|
|
// .self resolves down to non-array/non-pointer type.
|
|
|
|
arraybase.self = base.self;
|
2018-10-05 09:30:57 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpTypeImage:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[0];
|
2023-11-30 11:28:50 +00:00
|
|
|
auto &type = set<SPIRType>(id, op);
|
2018-10-05 09:30:57 +00:00
|
|
|
type.basetype = SPIRType::Image;
|
|
|
|
type.image.type = ops[1];
|
|
|
|
type.image.dim = static_cast<Dim>(ops[2]);
|
|
|
|
type.image.depth = ops[3] == 1;
|
|
|
|
type.image.arrayed = ops[4] != 0;
|
|
|
|
type.image.ms = ops[5] != 0;
|
|
|
|
type.image.sampled = ops[6];
|
|
|
|
type.image.format = static_cast<ImageFormat>(ops[7]);
|
|
|
|
type.image.access = (length >= 9) ? static_cast<AccessQualifier>(ops[8]) : AccessQualifierMax;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpTypeSampledImage:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[0];
|
|
|
|
uint32_t imagetype = ops[1];
|
2023-11-30 11:28:50 +00:00
|
|
|
auto &type = set<SPIRType>(id, op);
|
2018-10-05 09:30:57 +00:00
|
|
|
type = get<SPIRType>(imagetype);
|
|
|
|
type.basetype = SPIRType::SampledImage;
|
|
|
|
type.self = id;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpTypeSampler:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[0];
|
2023-11-30 11:28:50 +00:00
|
|
|
auto &type = set<SPIRType>(id, op);
|
2018-10-05 09:30:57 +00:00
|
|
|
type.basetype = SPIRType::Sampler;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpTypePointer:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[0];
|
|
|
|
|
2020-11-03 09:51:56 +00:00
|
|
|
// Very rarely, we might receive a FunctionPrototype here.
|
|
|
|
// We won't be able to compile it, but we shouldn't crash when parsing.
|
|
|
|
// We should be able to reflect.
|
|
|
|
auto *base = maybe_get<SPIRType>(ops[2]);
|
2023-11-30 11:28:50 +00:00
|
|
|
auto &ptrbase = set<SPIRType>(id, op);
|
2018-10-05 09:30:57 +00:00
|
|
|
|
2020-11-03 09:51:56 +00:00
|
|
|
if (base)
|
2023-11-30 14:42:46 +00:00
|
|
|
{
|
2020-11-03 09:51:56 +00:00
|
|
|
ptrbase = *base;
|
2023-11-30 14:42:46 +00:00
|
|
|
ptrbase.op = op;
|
|
|
|
}
|
2020-11-03 09:51:56 +00:00
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
ptrbase.pointer = true;
|
2018-11-26 11:23:28 +00:00
|
|
|
ptrbase.pointer_depth++;
|
2018-10-05 09:30:57 +00:00
|
|
|
ptrbase.storage = static_cast<StorageClass>(ops[1]);
|
|
|
|
|
|
|
|
if (ptrbase.storage == StorageClassAtomicCounter)
|
|
|
|
ptrbase.basetype = SPIRType::AtomicCounter;
|
|
|
|
|
2020-11-03 09:51:56 +00:00
|
|
|
if (base && base->forward_pointer)
|
2020-05-25 09:05:42 +00:00
|
|
|
forward_pointer_fixups.push_back({ id, ops[2] });
|
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
ptrbase.parent_type = ops[2];
|
|
|
|
|
|
|
|
// Do NOT set ptrbase.self!
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-04-24 12:12:50 +00:00
|
|
|
case OpTypeForwardPointer:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[0];
|
2023-11-30 11:28:50 +00:00
|
|
|
auto &ptrbase = set<SPIRType>(id, op);
|
2019-04-24 12:12:50 +00:00
|
|
|
ptrbase.pointer = true;
|
|
|
|
ptrbase.pointer_depth++;
|
|
|
|
ptrbase.storage = static_cast<StorageClass>(ops[1]);
|
2020-05-25 09:05:42 +00:00
|
|
|
ptrbase.forward_pointer = true;
|
2019-04-24 12:12:50 +00:00
|
|
|
|
|
|
|
if (ptrbase.storage == StorageClassAtomicCounter)
|
|
|
|
ptrbase.basetype = SPIRType::AtomicCounter;
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
case OpTypeStruct:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[0];
|
2023-11-30 11:28:50 +00:00
|
|
|
auto &type = set<SPIRType>(id, op);
|
2018-10-05 09:30:57 +00:00
|
|
|
type.basetype = SPIRType::Struct;
|
|
|
|
for (uint32_t i = 1; i < length; i++)
|
|
|
|
type.member_types.push_back(ops[i]);
|
|
|
|
|
|
|
|
// Check if we have seen this struct type before, with just different
|
|
|
|
// decorations.
|
|
|
|
//
|
|
|
|
// Add workaround for issue #17 as well by looking at OpName for the struct
|
|
|
|
// types, which we shouldn't normally do.
|
|
|
|
// We should not normally have to consider type aliases like this to begin with
|
|
|
|
// however ... glslang issues #304, #307 cover this.
|
|
|
|
|
|
|
|
// For stripped names, never consider struct type aliasing.
|
|
|
|
// We risk declaring the same struct multiple times, but type-punning is not allowed
|
|
|
|
// so this is safe.
|
|
|
|
bool consider_aliasing = !ir.get_name(type.self).empty();
|
|
|
|
if (consider_aliasing)
|
|
|
|
{
|
|
|
|
for (auto &other : global_struct_cache)
|
|
|
|
{
|
|
|
|
if (ir.get_name(type.self) == ir.get_name(other) &&
|
|
|
|
types_are_logically_equivalent(type, get<SPIRType>(other)))
|
|
|
|
{
|
|
|
|
type.type_alias = other;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-05 10:43:40 +00:00
|
|
|
if (type.type_alias == TypeID(0))
|
2018-10-05 09:30:57 +00:00
|
|
|
global_struct_cache.push_back(id);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpTypeFunction:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[0];
|
|
|
|
uint32_t ret = ops[1];
|
|
|
|
|
|
|
|
auto &func = set<SPIRFunctionPrototype>(id, ret);
|
|
|
|
for (uint32_t i = 2; i < length; i++)
|
|
|
|
func.parameter_types.push_back(ops[i]);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-04-21 12:25:18 +00:00
|
|
|
case OpTypeAccelerationStructureKHR:
|
2019-02-26 14:43:03 +00:00
|
|
|
{
|
|
|
|
uint32_t id = ops[0];
|
2023-11-30 11:28:50 +00:00
|
|
|
auto &type = set<SPIRType>(id, op);
|
2020-04-21 12:25:18 +00:00
|
|
|
type.basetype = SPIRType::AccelerationStructure;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-01-06 10:32:26 +00:00
|
|
|
case OpTypeRayQueryKHR:
|
2020-04-21 12:25:18 +00:00
|
|
|
{
|
|
|
|
uint32_t id = ops[0];
|
2023-11-30 11:28:50 +00:00
|
|
|
auto &type = set<SPIRType>(id, op);
|
2020-04-21 12:25:18 +00:00
|
|
|
type.basetype = SPIRType::RayQuery;
|
2019-02-26 14:43:03 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
// Variable declaration
|
|
|
|
// All variables are essentially pointers with a storage qualifier.
|
|
|
|
case OpVariable:
|
|
|
|
{
|
|
|
|
uint32_t type = ops[0];
|
|
|
|
uint32_t id = ops[1];
|
|
|
|
auto storage = static_cast<StorageClass>(ops[2]);
|
|
|
|
uint32_t initializer = length == 4 ? ops[3] : 0;
|
|
|
|
|
|
|
|
if (storage == StorageClassFunction)
|
|
|
|
{
|
|
|
|
if (!current_function)
|
|
|
|
SPIRV_CROSS_THROW("No function currently in scope");
|
|
|
|
current_function->add_local_variable(id);
|
|
|
|
}
|
|
|
|
|
|
|
|
set<SPIRVariable>(id, type, storage, initializer);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// OpPhi
|
|
|
|
// OpPhi is a fairly magical opcode.
|
|
|
|
// It selects temporary variables based on which parent block we *came from*.
|
|
|
|
// In high-level languages we can "de-SSA" by creating a function local, and flush out temporaries to this function-local
|
|
|
|
// variable to emulate SSA Phi.
|
|
|
|
case OpPhi:
|
|
|
|
{
|
|
|
|
if (!current_function)
|
|
|
|
SPIRV_CROSS_THROW("No function currently in scope");
|
|
|
|
if (!current_block)
|
|
|
|
SPIRV_CROSS_THROW("No block currently in scope");
|
|
|
|
|
|
|
|
uint32_t result_type = ops[0];
|
|
|
|
uint32_t id = ops[1];
|
|
|
|
|
|
|
|
// Instead of a temporary, create a new function-wide temporary with this ID instead.
|
|
|
|
auto &var = set<SPIRVariable>(id, result_type, spv::StorageClassFunction);
|
|
|
|
var.phi_variable = true;
|
|
|
|
|
|
|
|
current_function->add_local_variable(id);
|
|
|
|
|
|
|
|
for (uint32_t i = 2; i + 2 <= length; i += 2)
|
|
|
|
current_block->phi_variables.push_back({ ops[i], ops[i + 1], id });
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Constants
|
|
|
|
case OpSpecConstant:
|
|
|
|
case OpConstant:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[1];
|
|
|
|
auto &type = get<SPIRType>(ops[0]);
|
|
|
|
|
|
|
|
if (type.width > 32)
|
|
|
|
set<SPIRConstant>(id, ops[0], ops[2] | (uint64_t(ops[3]) << 32), op == OpSpecConstant);
|
|
|
|
else
|
|
|
|
set<SPIRConstant>(id, ops[0], ops[2], op == OpSpecConstant);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpSpecConstantFalse:
|
|
|
|
case OpConstantFalse:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[1];
|
|
|
|
set<SPIRConstant>(id, ops[0], uint32_t(0), op == OpSpecConstantFalse);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpSpecConstantTrue:
|
|
|
|
case OpConstantTrue:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[1];
|
|
|
|
set<SPIRConstant>(id, ops[0], uint32_t(1), op == OpSpecConstantTrue);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpConstantNull:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[1];
|
|
|
|
uint32_t type = ops[0];
|
2020-03-26 10:21:23 +00:00
|
|
|
ir.make_constant_null(id, type, true);
|
2018-10-05 09:30:57 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpSpecConstantComposite:
|
|
|
|
case OpConstantComposite:
|
|
|
|
{
|
|
|
|
uint32_t id = ops[1];
|
|
|
|
uint32_t type = ops[0];
|
|
|
|
|
|
|
|
auto &ctype = get<SPIRType>(type);
|
|
|
|
|
|
|
|
// We can have constants which are structs and arrays.
|
|
|
|
// In this case, our SPIRConstant will be a list of other SPIRConstant ids which we
|
|
|
|
// can refer to.
|
|
|
|
if (ctype.basetype == SPIRType::Struct || !ctype.array.empty())
|
|
|
|
{
|
|
|
|
set<SPIRConstant>(id, type, ops + 2, length - 2, op == OpSpecConstantComposite);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
uint32_t elements = length - 2;
|
|
|
|
if (elements > 4)
|
|
|
|
SPIRV_CROSS_THROW("OpConstantComposite only supports 1, 2, 3 and 4 elements.");
|
|
|
|
|
|
|
|
SPIRConstant remapped_constant_ops[4];
|
|
|
|
const SPIRConstant *c[4];
|
|
|
|
for (uint32_t i = 0; i < elements; i++)
|
|
|
|
{
|
|
|
|
// Specialization constants operations can also be part of this.
|
|
|
|
// We do not know their value, so any attempt to query SPIRConstant later
|
|
|
|
// will fail. We can only propagate the ID of the expression and use to_expression on it.
|
|
|
|
auto *constant_op = maybe_get<SPIRConstantOp>(ops[2 + i]);
|
2019-03-27 09:51:23 +00:00
|
|
|
auto *undef_op = maybe_get<SPIRUndef>(ops[2 + i]);
|
2018-10-05 09:30:57 +00:00
|
|
|
if (constant_op)
|
|
|
|
{
|
|
|
|
if (op == OpConstantComposite)
|
|
|
|
SPIRV_CROSS_THROW("Specialization constant operation used in OpConstantComposite.");
|
|
|
|
|
|
|
|
remapped_constant_ops[i].make_null(get<SPIRType>(constant_op->basetype));
|
|
|
|
remapped_constant_ops[i].self = constant_op->self;
|
|
|
|
remapped_constant_ops[i].constant_type = constant_op->basetype;
|
|
|
|
remapped_constant_ops[i].specialization = true;
|
|
|
|
c[i] = &remapped_constant_ops[i];
|
|
|
|
}
|
2019-03-27 09:51:23 +00:00
|
|
|
else if (undef_op)
|
|
|
|
{
|
|
|
|
// Undefined, just pick 0.
|
|
|
|
remapped_constant_ops[i].make_null(get<SPIRType>(undef_op->basetype));
|
|
|
|
remapped_constant_ops[i].constant_type = undef_op->basetype;
|
|
|
|
c[i] = &remapped_constant_ops[i];
|
|
|
|
}
|
2018-10-05 09:30:57 +00:00
|
|
|
else
|
|
|
|
c[i] = &get<SPIRConstant>(ops[2 + i]);
|
|
|
|
}
|
|
|
|
set<SPIRConstant>(id, type, c, elements, op == OpSpecConstantComposite);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Functions
|
|
|
|
case OpFunction:
|
|
|
|
{
|
|
|
|
uint32_t res = ops[0];
|
|
|
|
uint32_t id = ops[1];
|
|
|
|
// Control
|
|
|
|
uint32_t type = ops[3];
|
|
|
|
|
|
|
|
if (current_function)
|
|
|
|
SPIRV_CROSS_THROW("Must end a function before starting a new one!");
|
|
|
|
|
|
|
|
current_function = &set<SPIRFunction>(id, res, type);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpFunctionParameter:
|
|
|
|
{
|
|
|
|
uint32_t type = ops[0];
|
|
|
|
uint32_t id = ops[1];
|
|
|
|
|
|
|
|
if (!current_function)
|
|
|
|
SPIRV_CROSS_THROW("Must be in a function!");
|
|
|
|
|
|
|
|
current_function->add_parameter(type, id);
|
|
|
|
set<SPIRVariable>(id, type, StorageClassFunction);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpFunctionEnd:
|
|
|
|
{
|
|
|
|
if (current_block)
|
|
|
|
{
|
|
|
|
// Very specific error message, but seems to come up quite often.
|
|
|
|
SPIRV_CROSS_THROW(
|
|
|
|
"Cannot end a function before ending the current block.\n"
|
|
|
|
"Likely cause: If this SPIR-V was created from glslang HLSL, make sure the entry point is valid.");
|
|
|
|
}
|
|
|
|
current_function = nullptr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Blocks
|
|
|
|
case OpLabel:
|
|
|
|
{
|
|
|
|
// OpLabel always starts a block.
|
|
|
|
if (!current_function)
|
|
|
|
SPIRV_CROSS_THROW("Blocks cannot exist outside functions!");
|
|
|
|
|
|
|
|
uint32_t id = ops[0];
|
|
|
|
|
|
|
|
current_function->blocks.push_back(id);
|
|
|
|
if (!current_function->entry_block)
|
|
|
|
current_function->entry_block = id;
|
|
|
|
|
|
|
|
if (current_block)
|
|
|
|
SPIRV_CROSS_THROW("Cannot start a block before ending the current block.");
|
|
|
|
|
|
|
|
current_block = &set<SPIRBlock>(id);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Branch instructions end blocks.
|
|
|
|
case OpBranch:
|
|
|
|
{
|
|
|
|
if (!current_block)
|
|
|
|
SPIRV_CROSS_THROW("Trying to end a non-existing block.");
|
|
|
|
|
|
|
|
uint32_t target = ops[0];
|
|
|
|
current_block->terminator = SPIRBlock::Direct;
|
|
|
|
current_block->next_block = target;
|
|
|
|
current_block = nullptr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpBranchConditional:
|
|
|
|
{
|
|
|
|
if (!current_block)
|
|
|
|
SPIRV_CROSS_THROW("Trying to end a non-existing block.");
|
|
|
|
|
|
|
|
current_block->condition = ops[0];
|
|
|
|
current_block->true_block = ops[1];
|
|
|
|
current_block->false_block = ops[2];
|
|
|
|
|
|
|
|
current_block->terminator = SPIRBlock::Select;
|
2021-11-07 11:37:23 +00:00
|
|
|
|
|
|
|
if (current_block->true_block == current_block->false_block)
|
|
|
|
{
|
|
|
|
// Bogus conditional, translate to a direct branch.
|
|
|
|
// Avoids some ugly edge cases later when analyzing CFGs.
|
|
|
|
|
|
|
|
// There are some super jank cases where the merge block is different from the true/false,
|
|
|
|
// and later branches can "break" out of the selection construct this way.
|
|
|
|
// This is complete nonsense, but CTS hits this case.
|
|
|
|
// In this scenario, we should see the selection construct as more of a Switch with one default case.
|
|
|
|
// The problem here is that this breaks any attempt to break out of outer switch statements,
|
|
|
|
// but it's theoretically solvable if this ever comes up using the ladder breaking system ...
|
|
|
|
|
|
|
|
if (current_block->true_block != current_block->next_block &&
|
|
|
|
current_block->merge == SPIRBlock::MergeSelection)
|
|
|
|
{
|
|
|
|
uint32_t ids = ir.increase_bound_by(2);
|
|
|
|
|
2023-12-07 11:18:35 +00:00
|
|
|
auto &type = set<SPIRType>(ids, OpTypeInt);
|
2021-11-07 11:37:23 +00:00
|
|
|
type.basetype = SPIRType::Int;
|
|
|
|
type.width = 32;
|
|
|
|
auto &c = set<SPIRConstant>(ids + 1, ids);
|
|
|
|
|
|
|
|
current_block->condition = c.self;
|
|
|
|
current_block->default_block = current_block->true_block;
|
|
|
|
current_block->terminator = SPIRBlock::MultiSelect;
|
|
|
|
ir.block_meta[current_block->next_block] &= ~ParsedIR::BLOCK_META_SELECTION_MERGE_BIT;
|
|
|
|
ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2022-07-22 12:36:31 +00:00
|
|
|
// Collapse loops if we have to.
|
|
|
|
bool collapsed_loop = current_block->true_block == current_block->merge_block &&
|
|
|
|
current_block->merge == SPIRBlock::MergeLoop;
|
|
|
|
|
|
|
|
if (collapsed_loop)
|
|
|
|
{
|
|
|
|
ir.block_meta[current_block->merge_block] &= ~ParsedIR::BLOCK_META_LOOP_MERGE_BIT;
|
|
|
|
ir.block_meta[current_block->continue_block] &= ~ParsedIR::BLOCK_META_CONTINUE_BIT;
|
|
|
|
}
|
|
|
|
|
2021-11-07 11:37:23 +00:00
|
|
|
current_block->next_block = current_block->true_block;
|
|
|
|
current_block->condition = 0;
|
|
|
|
current_block->true_block = 0;
|
|
|
|
current_block->false_block = 0;
|
|
|
|
current_block->merge_block = 0;
|
|
|
|
current_block->merge = SPIRBlock::MergeNone;
|
|
|
|
current_block->terminator = SPIRBlock::Direct;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
current_block = nullptr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpSwitch:
|
|
|
|
{
|
|
|
|
if (!current_block)
|
|
|
|
SPIRV_CROSS_THROW("Trying to end a non-existing block.");
|
|
|
|
|
|
|
|
current_block->terminator = SPIRBlock::MultiSelect;
|
|
|
|
|
|
|
|
current_block->condition = ops[0];
|
|
|
|
current_block->default_block = ops[1];
|
|
|
|
|
2021-11-02 20:17:13 +00:00
|
|
|
uint32_t remaining_ops = length - 2;
|
2021-11-12 13:17:38 +00:00
|
|
|
if ((remaining_ops % 2) == 0)
|
|
|
|
{
|
|
|
|
for (uint32_t i = 2; i + 2 <= length; i += 2)
|
|
|
|
current_block->cases_32bit.push_back({ ops[i], ops[i + 1] });
|
|
|
|
}
|
2021-11-02 20:17:13 +00:00
|
|
|
|
2021-11-08 18:18:13 +00:00
|
|
|
if ((remaining_ops % 3) == 0)
|
2021-11-12 13:17:38 +00:00
|
|
|
{
|
2021-11-08 18:18:13 +00:00
|
|
|
for (uint32_t i = 2; i + 3 <= length; i += 3)
|
|
|
|
{
|
2021-11-18 19:08:59 +00:00
|
|
|
uint64_t value = (static_cast<uint64_t>(ops[i + 1]) << 32) | ops[i];
|
2021-11-02 20:17:13 +00:00
|
|
|
current_block->cases_64bit.push_back({ value, ops[i + 2] });
|
2021-10-28 22:57:41 +00:00
|
|
|
}
|
2021-11-12 13:17:38 +00:00
|
|
|
}
|
2018-10-05 09:30:57 +00:00
|
|
|
|
|
|
|
// If we jump to next block, make it break instead since we're inside a switch case block at that point.
|
|
|
|
ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT;
|
|
|
|
|
|
|
|
current_block = nullptr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpKill:
|
2022-03-03 09:36:07 +00:00
|
|
|
case OpTerminateInvocation:
|
2018-10-05 09:30:57 +00:00
|
|
|
{
|
|
|
|
if (!current_block)
|
|
|
|
SPIRV_CROSS_THROW("Trying to end a non-existing block.");
|
|
|
|
current_block->terminator = SPIRBlock::Kill;
|
|
|
|
current_block = nullptr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-01-08 10:37:29 +00:00
|
|
|
case OpTerminateRayKHR:
|
|
|
|
// NV variant is not a terminator.
|
|
|
|
if (!current_block)
|
|
|
|
SPIRV_CROSS_THROW("Trying to end a non-existing block.");
|
|
|
|
current_block->terminator = SPIRBlock::TerminateRay;
|
|
|
|
current_block = nullptr;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OpIgnoreIntersectionKHR:
|
|
|
|
// NV variant is not a terminator.
|
|
|
|
if (!current_block)
|
|
|
|
SPIRV_CROSS_THROW("Trying to end a non-existing block.");
|
|
|
|
current_block->terminator = SPIRBlock::IgnoreIntersection;
|
|
|
|
current_block = nullptr;
|
|
|
|
break;
|
|
|
|
|
2022-09-05 10:31:22 +00:00
|
|
|
case OpEmitMeshTasksEXT:
|
|
|
|
if (!current_block)
|
|
|
|
SPIRV_CROSS_THROW("Trying to end a non-existing block.");
|
|
|
|
current_block->terminator = SPIRBlock::EmitMeshTasks;
|
|
|
|
for (uint32_t i = 0; i < 3; i++)
|
|
|
|
current_block->mesh.groups[i] = ops[i];
|
|
|
|
current_block->mesh.payload = length >= 4 ? ops[3] : 0;
|
|
|
|
current_block = nullptr;
|
|
|
|
// Currently glslang is bugged and does not treat EmitMeshTasksEXT as a terminator.
|
|
|
|
ignore_trailing_block_opcodes = true;
|
|
|
|
break;
|
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
case OpReturn:
|
|
|
|
{
|
|
|
|
if (!current_block)
|
|
|
|
SPIRV_CROSS_THROW("Trying to end a non-existing block.");
|
|
|
|
current_block->terminator = SPIRBlock::Return;
|
|
|
|
current_block = nullptr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpReturnValue:
|
|
|
|
{
|
|
|
|
if (!current_block)
|
|
|
|
SPIRV_CROSS_THROW("Trying to end a non-existing block.");
|
|
|
|
current_block->terminator = SPIRBlock::Return;
|
|
|
|
current_block->return_value = ops[0];
|
|
|
|
current_block = nullptr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpUnreachable:
|
|
|
|
{
|
|
|
|
if (!current_block)
|
|
|
|
SPIRV_CROSS_THROW("Trying to end a non-existing block.");
|
|
|
|
current_block->terminator = SPIRBlock::Unreachable;
|
|
|
|
current_block = nullptr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpSelectionMerge:
|
|
|
|
{
|
|
|
|
if (!current_block)
|
|
|
|
SPIRV_CROSS_THROW("Trying to modify a non-existing block.");
|
|
|
|
|
|
|
|
current_block->next_block = ops[0];
|
|
|
|
current_block->merge = SPIRBlock::MergeSelection;
|
|
|
|
ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_SELECTION_MERGE_BIT;
|
|
|
|
|
|
|
|
if (length >= 2)
|
|
|
|
{
|
|
|
|
if (ops[1] & SelectionControlFlattenMask)
|
|
|
|
current_block->hint = SPIRBlock::HintFlatten;
|
|
|
|
else if (ops[1] & SelectionControlDontFlattenMask)
|
|
|
|
current_block->hint = SPIRBlock::HintDontFlatten;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpLoopMerge:
|
|
|
|
{
|
|
|
|
if (!current_block)
|
|
|
|
SPIRV_CROSS_THROW("Trying to modify a non-existing block.");
|
|
|
|
|
|
|
|
current_block->merge_block = ops[0];
|
|
|
|
current_block->continue_block = ops[1];
|
|
|
|
current_block->merge = SPIRBlock::MergeLoop;
|
|
|
|
|
|
|
|
ir.block_meta[current_block->self] |= ParsedIR::BLOCK_META_LOOP_HEADER_BIT;
|
|
|
|
ir.block_meta[current_block->merge_block] |= ParsedIR::BLOCK_META_LOOP_MERGE_BIT;
|
|
|
|
|
2019-09-05 10:43:40 +00:00
|
|
|
ir.continue_block_to_loop_header[current_block->continue_block] = BlockID(current_block->self);
|
2018-10-05 09:30:57 +00:00
|
|
|
|
|
|
|
// Don't add loop headers to continue blocks,
|
|
|
|
// which would make it impossible branch into the loop header since
|
|
|
|
// they are treated as continues.
|
2019-09-05 10:43:40 +00:00
|
|
|
if (current_block->continue_block != BlockID(current_block->self))
|
2018-10-05 09:30:57 +00:00
|
|
|
ir.block_meta[current_block->continue_block] |= ParsedIR::BLOCK_META_CONTINUE_BIT;
|
|
|
|
|
|
|
|
if (length >= 3)
|
|
|
|
{
|
|
|
|
if (ops[2] & LoopControlUnrollMask)
|
|
|
|
current_block->hint = SPIRBlock::HintUnroll;
|
|
|
|
else if (ops[2] & LoopControlDontUnrollMask)
|
|
|
|
current_block->hint = SPIRBlock::HintDontUnroll;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpSpecConstantOp:
|
|
|
|
{
|
|
|
|
if (length < 3)
|
|
|
|
SPIRV_CROSS_THROW("OpSpecConstantOp not enough arguments.");
|
|
|
|
|
|
|
|
uint32_t result_type = ops[0];
|
|
|
|
uint32_t id = ops[1];
|
|
|
|
auto spec_op = static_cast<Op>(ops[2]);
|
|
|
|
|
|
|
|
set<SPIRConstantOp>(id, result_type, spec_op, ops + 3, length - 3);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-05-28 11:41:46 +00:00
|
|
|
case OpLine:
|
|
|
|
{
|
|
|
|
// OpLine might come at global scope, but we don't care about those since they will not be declared in any
|
|
|
|
// meaningful correct order.
|
2019-05-28 13:51:42 +00:00
|
|
|
// Ignore all OpLine directives which live outside a function.
|
2019-05-28 11:41:46 +00:00
|
|
|
if (current_block)
|
|
|
|
current_block->ops.push_back(instruction);
|
|
|
|
|
2019-05-28 13:51:42 +00:00
|
|
|
// Line directives may arrive before first OpLabel.
|
|
|
|
// Treat this as the line of the function declaration,
|
|
|
|
// so warnings for arguments can propagate properly.
|
|
|
|
if (current_function)
|
2019-05-28 11:41:46 +00:00
|
|
|
{
|
|
|
|
// Store the first one we find and emit it before creating the function prototype.
|
|
|
|
if (current_function->entry_line.file_id == 0)
|
|
|
|
{
|
|
|
|
current_function->entry_line.file_id = ops[0];
|
|
|
|
current_function->entry_line.line_literal = ops[1];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-07-04 08:03:06 +00:00
|
|
|
case OpNoLine:
|
|
|
|
{
|
|
|
|
// OpNoLine might come at global scope.
|
|
|
|
if (current_block)
|
|
|
|
current_block->ops.push_back(instruction);
|
2019-07-05 08:25:18 +00:00
|
|
|
break;
|
2019-07-04 08:03:06 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
// Actual opcodes.
|
|
|
|
default:
|
|
|
|
{
|
2021-11-12 13:17:38 +00:00
|
|
|
if (length >= 2)
|
2021-11-08 18:18:13 +00:00
|
|
|
{
|
2021-11-02 20:17:13 +00:00
|
|
|
const auto *type = maybe_get<SPIRType>(ops[0]);
|
2021-11-08 18:18:13 +00:00
|
|
|
if (type)
|
2021-11-03 19:12:14 +00:00
|
|
|
ir.load_type_width.insert({ ops[1], type->width });
|
2021-11-02 20:17:13 +00:00
|
|
|
}
|
2022-02-16 10:49:24 +00:00
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
if (!current_block)
|
|
|
|
SPIRV_CROSS_THROW("Currently no block to insert opcode.");
|
|
|
|
|
|
|
|
current_block->ops.push_back(instruction);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Parser::types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const
|
|
|
|
{
|
|
|
|
if (a.basetype != b.basetype)
|
|
|
|
return false;
|
|
|
|
if (a.width != b.width)
|
|
|
|
return false;
|
|
|
|
if (a.vecsize != b.vecsize)
|
|
|
|
return false;
|
|
|
|
if (a.columns != b.columns)
|
|
|
|
return false;
|
|
|
|
if (a.array.size() != b.array.size())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
size_t array_count = a.array.size();
|
|
|
|
if (array_count && memcmp(a.array.data(), b.array.data(), array_count * sizeof(uint32_t)) != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (a.basetype == SPIRType::Image || a.basetype == SPIRType::SampledImage)
|
|
|
|
{
|
|
|
|
if (memcmp(&a.image, &b.image, sizeof(SPIRType::Image)) != 0)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (a.member_types.size() != b.member_types.size())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
size_t member_types = a.member_types.size();
|
|
|
|
for (size_t i = 0; i < member_types; i++)
|
|
|
|
{
|
|
|
|
if (!types_are_logically_equivalent(get<SPIRType>(a.member_types[i]), get<SPIRType>(b.member_types[i])))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Parser::variable_storage_is_aliased(const SPIRVariable &v) const
|
|
|
|
{
|
|
|
|
auto &type = get<SPIRType>(v.basetype);
|
2019-01-10 13:04:01 +00:00
|
|
|
|
|
|
|
auto *type_meta = ir.find_meta(type.self);
|
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
bool ssbo = v.storage == StorageClassStorageBuffer ||
|
2019-01-10 13:04:01 +00:00
|
|
|
(type_meta && type_meta->decoration.decoration_flags.get(DecorationBufferBlock));
|
2018-10-05 09:30:57 +00:00
|
|
|
bool image = type.basetype == SPIRType::Image;
|
|
|
|
bool counter = type.basetype == SPIRType::AtomicCounter;
|
|
|
|
|
|
|
|
bool is_restrict;
|
|
|
|
if (ssbo)
|
|
|
|
is_restrict = ir.get_buffer_block_flags(v).get(DecorationRestrict);
|
|
|
|
else
|
|
|
|
is_restrict = ir.has_decoration(v.self, DecorationRestrict);
|
|
|
|
|
|
|
|
return !is_restrict && (ssbo || image || counter);
|
|
|
|
}
|
2019-04-02 09:19:03 +00:00
|
|
|
} // namespace SPIRV_CROSS_NAMESPACE
|