Merge pull request #813 from KhronosGroup/fix-781

Optimize SPIR-V ID traversal, make it more correct and be more robust against large ID bounds.
This commit is contained in:
Hans-Kristian Arntzen 2019-01-11 12:52:11 +01:00 committed by GitHub
commit e4e53e8443
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
78 changed files with 1312 additions and 1214 deletions

View File

@ -2,11 +2,11 @@
#define SPIRV_CROSS_CONSTANT_ID_201 -10
#endif
static const int _7 = SPIRV_CROSS_CONSTANT_ID_201;
static const int _20 = (_7 + 2);
#ifndef SPIRV_CROSS_CONSTANT_ID_202
#define SPIRV_CROSS_CONSTANT_ID_202 100u
#endif
static const uint _8 = SPIRV_CROSS_CONSTANT_ID_202;
static const int _20 = (_7 + 2);
static const uint _25 = (_8 % 5u);
#ifndef SPIRV_CROSS_CONSTANT_ID_0
#define SPIRV_CROSS_CONSTANT_ID_0 int4(20, 30, _20, _20)

View File

@ -1,15 +1,16 @@
#ifndef SPIRV_CROSS_CONSTANT_ID_0
#define SPIRV_CROSS_CONSTANT_ID_0 4.0f
#endif
static const float X = SPIRV_CROSS_CONSTANT_ID_0;
static const uint3 gl_WorkGroupSize = uint3(2u, 1u, 1u);
struct Data
{
float a;
float b;
};
#ifndef SPIRV_CROSS_CONSTANT_ID_0
#define SPIRV_CROSS_CONSTANT_ID_0 4.0f
#endif
static const float X = SPIRV_CROSS_CONSTANT_ID_0;
static const uint3 gl_WorkGroupSize = uint3(2u, 1u, 1u);
static const Data _21 = { 1.0f, 2.0f };
static const Data _24 = { 3.0f, 4.0f };
static const Data _25[2] = { { 1.0f, 2.0f }, { 3.0f, 4.0f } };
@ -29,14 +30,13 @@ struct SPIRV_Cross_Input
void comp_main()
{
Data data[2] = _25;
Data _28 = { X, 2.0f };
Data _31[2] = { _28, _30 };
Data data2[2] = _31;
if (gl_LocalInvocationIndex == 0u)
{
_61.Store(gl_WorkGroupID.x * 8 + 0, asuint(data[gl_LocalInvocationID.x].a + data2[gl_LocalInvocationID.x].a));
_61.Store(gl_WorkGroupID.x * 8 + 4, asuint(data[gl_LocalInvocationID.x].b + data2[gl_LocalInvocationID.x].b));
_61.Store(gl_WorkGroupID.x * 8 + 0, asuint(_25[gl_LocalInvocationID.x].a + data2[gl_LocalInvocationID.x].a));
_61.Store(gl_WorkGroupID.x * 8 + 4, asuint(_25[gl_LocalInvocationID.x].b + data2[gl_LocalInvocationID.x].b));
}
}

View File

@ -6,15 +6,6 @@ static const int a = SPIRV_CROSS_CONSTANT_ID_0;
#define SPIRV_CROSS_CONSTANT_ID_1 200
#endif
static const int b = SPIRV_CROSS_CONSTANT_ID_1;
#ifndef SPIRV_CROSS_CONSTANT_ID_2
#define SPIRV_CROSS_CONSTANT_ID_2 300
#endif
static const int c = SPIRV_CROSS_CONSTANT_ID_2;
static const int _18 = (c + 50);
#ifndef SPIRV_CROSS_CONSTANT_ID_3
#define SPIRV_CROSS_CONSTANT_ID_3 400
#endif
static const int e = SPIRV_CROSS_CONSTANT_ID_3;
struct A
{
@ -28,6 +19,16 @@ struct B
int member1[a];
};
#ifndef SPIRV_CROSS_CONSTANT_ID_2
#define SPIRV_CROSS_CONSTANT_ID_2 300
#endif
static const int c = SPIRV_CROSS_CONSTANT_ID_2;
static const int _18 = (c + 50);
#ifndef SPIRV_CROSS_CONSTANT_ID_3
#define SPIRV_CROSS_CONSTANT_ID_3 400
#endif
static const int e = SPIRV_CROSS_CONSTANT_ID_3;
RWByteAddressBuffer _22 : register(u0);
static uint3 gl_GlobalInvocationID;

View File

@ -24,9 +24,8 @@ struct SPIRV_Cross_Output
void frag_main()
{
Foo foos[2] = _28;
FragColor = _16[_line].xxxx;
FragColor += (foos[_line].a * foos[1 - _line].a).xxxx;
FragColor += (_28[_line].a * _28[1 - _line].a).xxxx;
}
SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input)

View File

@ -3,17 +3,17 @@
using namespace metal;
struct SSBO
{
float a;
};
constant uint _5_tmp [[function_constant(10)]];
constant uint _5 = is_function_constant_defined(_5_tmp) ? _5_tmp : 9u;
constant uint _6_tmp [[function_constant(12)]];
constant uint _6 = is_function_constant_defined(_6_tmp) ? _6_tmp : 4u;
constant uint3 gl_WorkGroupSize = uint3(_5, 20u, _6);
struct SSBO
{
float a;
};
kernel void main0(device SSBO& _4 [[buffer(0)]])
{
_4.a += 1.0;

View File

@ -3,17 +3,17 @@
using namespace metal;
struct _6
{
float _m0[1];
};
constant uint _3_tmp [[function_constant(0)]];
constant uint _3 = is_function_constant_defined(_3_tmp) ? _3_tmp : 1u;
constant uint _4_tmp [[function_constant(2)]];
constant uint _4 = is_function_constant_defined(_4_tmp) ? _4_tmp : 3u;
constant uint3 gl_WorkGroupSize = uint3(_3, 2u, _4);
struct _6
{
float _m0[1];
};
kernel void main0(device _6& _8 [[buffer(0)]], device _6& _9 [[buffer(1)]], uint3 gl_WorkGroupID [[threadgroup_position_in_grid]])
{
_8._m0[gl_WorkGroupID.x] = _9._m0[gl_WorkGroupID.x] + _8._m0[gl_WorkGroupID.x];

View File

@ -3,13 +3,13 @@
using namespace metal;
constant uint3 gl_WorkGroupSize = uint3(16u, 16u, 1u);
struct cb1_struct
{
float4 _m0[1];
};
constant uint3 gl_WorkGroupSize = uint3(16u, 16u, 1u);
kernel void main0(constant cb1_struct& cb0_1 [[buffer(0)]], texture2d<float, access::write> u0 [[texture(1)]], uint3 gl_LocalInvocationID [[thread_position_in_threadgroup]])
{
int2 _46 = int2(u0.get_width(), u0.get_height()) >> int2(uint2(4u));

View File

@ -3,13 +3,13 @@
using namespace metal;
constant uint3 gl_WorkGroupSize = uint3(16u, 16u, 1u);
struct cb1_struct
{
float4 _m0[1];
};
constant uint3 gl_WorkGroupSize = uint3(16u, 16u, 1u);
kernel void main0(constant cb1_struct& cb0_1 [[buffer(0)]], texture2d<float, access::write> u0 [[texture(1)]], uint3 gl_LocalInvocationID [[thread_position_in_threadgroup]])
{
int2 _40 = int2(u0.get_width(), u0.get_height()) >> int2(uint2(4u));

View File

@ -3,6 +3,11 @@
using namespace metal;
struct _28
{
float4 _m0;
};
struct _6
{
float4 _m0;
@ -83,11 +88,6 @@ struct _18
float4 _m38[2];
};
struct _28
{
float4 _m0;
};
constant _28 _74 = {};
struct main0_out

View File

@ -5,9 +5,9 @@ using namespace metal;
constant int _7_tmp [[function_constant(201)]];
constant int _7 = is_function_constant_defined(_7_tmp) ? _7_tmp : -10;
constant int _20 = (_7 + 2);
constant uint _8_tmp [[function_constant(202)]];
constant uint _8 = is_function_constant_defined(_8_tmp) ? _8_tmp : 100u;
constant int _20 = (_7 + 2);
constant uint _25 = (_8 % 5u);
constant int4 _30 = int4(20, 30, _20, _20);
constant int2 _32 = int2(_30.y, _30.x);

View File

@ -5,10 +5,6 @@
using namespace metal;
constant float X_tmp [[function_constant(0)]];
constant float X = is_function_constant_defined(X_tmp) ? X_tmp : 4.0;
constant uint3 gl_WorkGroupSize = uint3(2u, 1u, 1u);
struct Data
{
float a;
@ -21,12 +17,17 @@ struct Data_1
float b;
};
constant float X_tmp [[function_constant(0)]];
constant float X = is_function_constant_defined(X_tmp) ? X_tmp : 4.0;
struct SSBO
{
Data_1 outdata[1];
Data outdata[1];
};
constant Data _25[2] = { Data{ 1.0, 2.0 }, Data{ 3.0, 4.0 } };
constant uint3 gl_WorkGroupSize = uint3(2u, 1u, 1u);
constant Data_1 _25[2] = { Data_1{ 1.0, 2.0 }, Data_1{ 3.0, 4.0 } };
// Implementation of an array copy function to cover GLSL's ability to copy an array via assignment.
template<typename T, uint N>
@ -43,11 +44,10 @@ void spvArrayCopyFromConstant1(thread T (&dst)[N], constant T (&src)[N])
kernel void main0(device SSBO& _53 [[buffer(0)]], uint3 gl_WorkGroupID [[threadgroup_position_in_grid]], uint3 gl_LocalInvocationID [[thread_position_in_threadgroup]])
{
Data data[2] = { Data{ 1.0, 2.0 }, Data{ 3.0, 4.0 } };
Data _31[2] = { Data{ X, 2.0 }, Data{ 3.0, 5.0 } };
Data data2[2];
Data_1 _31[2] = { Data_1{ X, 2.0 }, Data_1{ 3.0, 5.0 } };
Data_1 data2[2];
spvArrayCopyFromStack1(data2, _31);
_53.outdata[gl_WorkGroupID.x].a = data[gl_LocalInvocationID.x].a + data2[gl_LocalInvocationID.x].a;
_53.outdata[gl_WorkGroupID.x].b = data[gl_LocalInvocationID.x].b + data2[gl_LocalInvocationID.x].b;
_53.outdata[gl_WorkGroupID.x].a = _25[gl_LocalInvocationID.x].a + data2[gl_LocalInvocationID.x].a;
_53.outdata[gl_WorkGroupID.x].b = _25[gl_LocalInvocationID.x].b + data2[gl_LocalInvocationID.x].b;
}

View File

@ -6,8 +6,6 @@
using namespace metal;
constant uint3 gl_WorkGroupSize = uint3(4u, 1u, 1u);
struct SSBO
{
float in_data[1];
@ -23,6 +21,8 @@ struct SSBO3
uint count;
};
constant uint3 gl_WorkGroupSize = uint3(4u, 1u, 1u);
kernel void main0(const device SSBO& _22 [[buffer(0)]], device SSBO2& _38 [[buffer(1)]], device SSBO3& _41 [[buffer(2)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]])
{
float _28 = _22.in_data[gl_GlobalInvocationID.x];

View File

@ -3,8 +3,6 @@
using namespace metal;
constant uint3 gl_WorkGroupSize = uint3(32u, 1u, 1u);
struct T1
{
packed_float3 a;
@ -21,6 +19,8 @@ struct Buffer1
float buf1[1];
};
constant uint3 gl_WorkGroupSize = uint3(32u, 1u, 1u);
kernel void main0(device Buffer0& _15 [[buffer(1)]], device Buffer1& _34 [[buffer(2)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]])
{
_34.buf1[gl_GlobalInvocationID.x] = _15.buf0[0].b;

View File

@ -3,8 +3,6 @@
using namespace metal;
constant uint3 gl_WorkGroupSize = uint3(32u, 1u, 1u);
struct T1
{
packed_float3 a;
@ -21,6 +19,8 @@ struct Buffer1
float buf1[1];
};
constant uint3 gl_WorkGroupSize = uint3(32u, 1u, 1u);
kernel void main0(device Buffer0& _14 [[buffer(1)]], device Buffer1& _24 [[buffer(2)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]])
{
_24.buf1[gl_GlobalInvocationID.x] = _14.buf0[0].b;

View File

@ -3,13 +3,13 @@
using namespace metal;
constant uint3 gl_WorkGroupSize = uint3(4u, 4u, 1u);
struct SSBO
{
float out_data[1];
};
constant uint3 gl_WorkGroupSize = uint3(4u, 4u, 1u);
kernel void main0(device SSBO& _67 [[buffer(0)]], uint3 gl_LocalInvocationID [[thread_position_in_threadgroup]], uint gl_LocalInvocationIndex [[thread_index_in_threadgroup]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]])
{
threadgroup float foo[4][4];

View File

@ -3,8 +3,6 @@
using namespace metal;
constant uint3 gl_WorkGroupSize = uint3(4u, 1u, 1u);
struct SSBO
{
float in_data[1];
@ -15,6 +13,8 @@ struct SSBO2
float out_data[1];
};
constant uint3 gl_WorkGroupSize = uint3(4u, 1u, 1u);
kernel void main0(const device SSBO& _22 [[buffer(0)]], device SSBO2& _44 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]], uint gl_LocalInvocationIndex [[thread_index_in_threadgroup]])
{
threadgroup float sShared[4];

View File

@ -11,13 +11,6 @@ constant int a = SPIRV_CROSS_CONSTANT_ID_0;
#define SPIRV_CROSS_CONSTANT_ID_1 200
#endif
constant int b = SPIRV_CROSS_CONSTANT_ID_1;
#ifndef SPIRV_CROSS_CONSTANT_ID_2
#define SPIRV_CROSS_CONSTANT_ID_2 300
#endif
constant int c = SPIRV_CROSS_CONSTANT_ID_2;
constant int _18 = (c + 50);
constant int e_tmp [[function_constant(3)]];
constant int e = is_function_constant_defined(e_tmp) ? e_tmp : 400;
struct A
{
@ -31,6 +24,12 @@ struct B
int member1[a];
};
#ifndef SPIRV_CROSS_CONSTANT_ID_2
#define SPIRV_CROSS_CONSTANT_ID_2 300
#endif
constant int c = SPIRV_CROSS_CONSTANT_ID_2;
constant int _18 = (c + 50);
struct SSBO
{
A member_a;
@ -39,6 +38,9 @@ struct SSBO
int w[_18];
};
constant int e_tmp [[function_constant(3)]];
constant int e = is_function_constant_defined(e_tmp) ? e_tmp : 400;
kernel void main0(device SSBO& _22 [[buffer(0)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]])
{
_22.w[gl_GlobalInvocationID.x] += (_22.v[gl_GlobalInvocationID.x] + e);

View File

@ -9,6 +9,12 @@ using namespace metal;
constant int b = SPIRV_CROSS_CONSTANT_ID_1;
constant int a_tmp [[function_constant(0)]];
constant int a = is_function_constant_defined(a_tmp) ? a_tmp : 1;
struct SSBO
{
int v[1];
};
constant uint _21 = (uint(a) + 0u);
constant uint _22_tmp [[function_constant(10)]];
constant uint _22 = is_function_constant_defined(_22_tmp) ? _22_tmp : 1u;
@ -19,11 +25,6 @@ constant uint _29 = gl_WorkGroupSize.y;
constant uint _30 = (_28 + _29);
constant int _32 = (1 - a);
struct SSBO
{
int v[1];
};
kernel void main0(device SSBO& _17 [[buffer(0)]])
{
int spec_const_array_size[b];

View File

@ -1,5 +1,3 @@
#pragma clang diagnostic ignored "-Wmissing-prototypes"
#include <metal_stdlib>
#include <simd/simd.h>
@ -27,24 +25,10 @@ struct main0_in
int index [[user(locn0)]];
};
// Implementation of an array copy function to cover GLSL's ability to copy an array via assignment.
template<typename T, uint N>
void spvArrayCopyFromStack1(thread T (&dst)[N], thread const T (&src)[N])
{
for (uint i = 0; i < N; dst[i] = src[i], i++);
}
template<typename T, uint N>
void spvArrayCopyFromConstant1(thread T (&dst)[N], constant T (&src)[N])
{
for (uint i = 0; i < N; dst[i] = src[i], i++);
}
fragment main0_out main0(main0_in in [[stage_in]])
{
main0_out out = {};
Foobar indexable[2] = { Foobar{ 10.0, 40.0 }, Foobar{ 90.0, 70.0 } };
out.FragColor = ((_37[in.index] + _55[in.index][in.index + 1]) + float4(30.0)) + float4(indexable[in.index].a + indexable[in.index].b);
out.FragColor = ((_37[in.index] + _55[in.index][in.index + 1]) + float4(30.0)) + float4(_75[in.index].a + _75[in.index].b);
return out;
}

View File

@ -1,5 +1,3 @@
#pragma clang diagnostic ignored "-Wmissing-prototypes"
#include <metal_stdlib>
#include <simd/simd.h>
@ -24,25 +22,11 @@ struct main0_in
int line [[user(locn0)]];
};
// Implementation of an array copy function to cover GLSL's ability to copy an array via assignment.
template<typename T, uint N>
void spvArrayCopyFromStack1(thread T (&dst)[N], thread const T (&src)[N])
{
for (uint i = 0; i < N; dst[i] = src[i], i++);
}
template<typename T, uint N>
void spvArrayCopyFromConstant1(thread T (&dst)[N], constant T (&src)[N])
{
for (uint i = 0; i < N; dst[i] = src[i], i++);
}
fragment main0_out main0(main0_in in [[stage_in]])
{
main0_out out = {};
Foo foos[2] = { Foo{ 10.0, 20.0 }, Foo{ 30.0, 40.0 } };
out.FragColor = float4(_16[in.line]);
out.FragColor += float4(foos[in.line].a * foos[1 - in.line].a);
out.FragColor += float4(_28[in.line].a * _28[1 - in.line].a);
return out;
}

View File

@ -3,11 +3,6 @@
using namespace metal;
#ifndef SPIRV_CROSS_CONSTANT_ID_0
#define SPIRV_CROSS_CONSTANT_ID_0 3
#endif
constant int arraySize = SPIRV_CROSS_CONSTANT_ID_0;
struct storage_block
{
uint4 baz;
@ -20,6 +15,11 @@ struct constant_block
int bar;
};
#ifndef SPIRV_CROSS_CONSTANT_ID_0
#define SPIRV_CROSS_CONSTANT_ID_0 3
#endif
constant int arraySize = SPIRV_CROSS_CONSTANT_ID_0;
vertex void main0(device storage_block* storage_0 [[buffer(0)]], device storage_block* storage_1 [[buffer(1)]], constant constant_block* constants_0 [[buffer(2)]], constant constant_block* constants_1 [[buffer(3)]], constant constant_block* constants_2 [[buffer(4)]], constant constant_block* constants_3 [[buffer(5)]], array<texture2d<int>, 3> images [[texture(0)]])
{
device storage_block* storage[] =

View File

@ -3,11 +3,6 @@
using namespace metal;
#ifndef SPIRV_CROSS_CONSTANT_ID_0
#define SPIRV_CROSS_CONSTANT_ID_0 3
#endif
constant int arraySize = SPIRV_CROSS_CONSTANT_ID_0;
struct storage_block
{
uint4 baz;
@ -20,6 +15,11 @@ struct constant_block
int bar;
};
#ifndef SPIRV_CROSS_CONSTANT_ID_0
#define SPIRV_CROSS_CONSTANT_ID_0 3
#endif
constant int arraySize = SPIRV_CROSS_CONSTANT_ID_0;
vertex void main0(device storage_block* storage_0 [[buffer(0)]], device storage_block* storage_1 [[buffer(1)]], constant constant_block* constants_0 [[buffer(2)]], constant constant_block* constants_1 [[buffer(3)]], constant constant_block* constants_2 [[buffer(4)]], constant constant_block* constants_3 [[buffer(5)]], array<texture2d<int>, 3> images [[texture(0)]])
{
device storage_block* storage[] =

View File

@ -4,11 +4,11 @@
#define SPIRV_CROSS_CONSTANT_ID_201 -10
#endif
const int _7 = SPIRV_CROSS_CONSTANT_ID_201;
const int _20 = (_7 + 2);
#ifndef SPIRV_CROSS_CONSTANT_ID_202
#define SPIRV_CROSS_CONSTANT_ID_202 100u
#endif
const uint _8 = SPIRV_CROSS_CONSTANT_ID_202;
const int _20 = (_7 + 2);
const uint _25 = (_8 % 5u);
const ivec4 _30 = ivec4(20, 30, _20, _20);
const ivec2 _32 = ivec2(_30.y, _30.x);

View File

@ -1,8 +1,8 @@
#version 450
layout(constant_id = 201) const int _7 = -10;
layout(constant_id = 202) const uint _8 = 100u;
const int _20 = (_7 + 2);
layout(constant_id = 202) const uint _8 = 100u;
const uint _25 = (_8 % 5u);
const ivec4 _30 = ivec4(20, 30, _20, _20);
const ivec2 _32 = ivec2(_30.y, _30.x);

View File

@ -1,17 +1,18 @@
#version 310 es
layout(local_size_x = 2, local_size_y = 1, local_size_z = 1) in;
#ifndef SPIRV_CROSS_CONSTANT_ID_0
#define SPIRV_CROSS_CONSTANT_ID_0 4.0
#endif
const float X = SPIRV_CROSS_CONSTANT_ID_0;
struct Data
{
float a;
float b;
};
const Data _25[2] = Data[](Data(1.0, 2.0), Data(3.0, 4.0));
#ifndef SPIRV_CROSS_CONSTANT_ID_0
#define SPIRV_CROSS_CONSTANT_ID_0 4.0
#endif
const float X = SPIRV_CROSS_CONSTANT_ID_0;
layout(binding = 0, std430) buffer SSBO
{
Data outdata[];
@ -19,9 +20,8 @@ layout(binding = 0, std430) buffer SSBO
void main()
{
Data data[2] = Data[](Data(1.0, 2.0), Data(3.0, 4.0));
Data data2[2] = Data[](Data(X, 2.0), Data(3.0, 5.0));
_53.outdata[gl_WorkGroupID.x].a = data[gl_LocalInvocationID.x].a + data2[gl_LocalInvocationID.x].a;
_53.outdata[gl_WorkGroupID.x].b = data[gl_LocalInvocationID.x].b + data2[gl_LocalInvocationID.x].b;
_53.outdata[gl_WorkGroupID.x].a = _25[gl_LocalInvocationID.x].a + data2[gl_LocalInvocationID.x].a;
_53.outdata[gl_WorkGroupID.x].b = _25[gl_LocalInvocationID.x].b + data2[gl_LocalInvocationID.x].b;
}

View File

@ -2,21 +2,21 @@
precision mediump float;
precision highp int;
const vec4 _37[3] = vec4[](vec4(1.0), vec4(2.0), vec4(3.0));
const vec4 _55[2][2] = vec4[][](vec4[](vec4(1.0), vec4(2.0)), vec4[](vec4(8.0), vec4(10.0)));
struct Foobar
{
float a;
float b;
};
const vec4 _37[3] = vec4[](vec4(1.0), vec4(2.0), vec4(3.0));
const vec4 _55[2][2] = vec4[][](vec4[](vec4(1.0), vec4(2.0)), vec4[](vec4(8.0), vec4(10.0)));
const Foobar _75[2] = Foobar[](Foobar(10.0, 40.0), Foobar(90.0, 70.0));
layout(location = 0) out vec4 FragColor;
layout(location = 0) flat in mediump int index;
void main()
{
Foobar indexable[2] = Foobar[](Foobar(10.0, 40.0), Foobar(90.0, 70.0));
FragColor = ((_37[index] + _55[index][index + 1]) + vec4(30.0)) + vec4(indexable[index].a + indexable[index].b);
FragColor = ((_37[index] + _55[index][index + 1]) + vec4(30.0)) + vec4(_75[index].a + _75[index].b);
}

View File

@ -10,13 +10,14 @@ struct Foo
float b;
};
const Foo _28[2] = Foo[](Foo(10.0, 20.0), Foo(30.0, 40.0));
layout(location = 0) out vec4 FragColor;
layout(location = 0) flat in mediump int line;
void main()
{
Foo foos[2] = Foo[](Foo(10.0, 20.0), Foo(30.0, 40.0));
FragColor = vec4(_16[line]);
FragColor += vec4(foos[line].a * foos[1 - line].a);
FragColor += vec4(_28[line].a * _28[1 - line].a);
}

View File

@ -9,15 +9,6 @@ const int a = SPIRV_CROSS_CONSTANT_ID_0;
#define SPIRV_CROSS_CONSTANT_ID_1 200
#endif
const int b = SPIRV_CROSS_CONSTANT_ID_1;
#ifndef SPIRV_CROSS_CONSTANT_ID_2
#define SPIRV_CROSS_CONSTANT_ID_2 300
#endif
const int c = SPIRV_CROSS_CONSTANT_ID_2;
const int _18 = (c + 50);
#ifndef SPIRV_CROSS_CONSTANT_ID_3
#define SPIRV_CROSS_CONSTANT_ID_3 400
#endif
const int e = SPIRV_CROSS_CONSTANT_ID_3;
struct A
{
@ -31,6 +22,16 @@ struct B
int member1[a];
};
#ifndef SPIRV_CROSS_CONSTANT_ID_2
#define SPIRV_CROSS_CONSTANT_ID_2 300
#endif
const int c = SPIRV_CROSS_CONSTANT_ID_2;
const int _18 = (c + 50);
#ifndef SPIRV_CROSS_CONSTANT_ID_3
#define SPIRV_CROSS_CONSTANT_ID_3 400
#endif
const int e = SPIRV_CROSS_CONSTANT_ID_3;
layout(binding = 0, std430) buffer SSBO
{
A member_a;

View File

@ -3,9 +3,6 @@ layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
layout(constant_id = 0) const int a = 100;
layout(constant_id = 1) const int b = 200;
layout(constant_id = 2) const int c = 300;
const int _18 = (c + 50);
layout(constant_id = 3) const int e = 400;
struct A
{
@ -19,6 +16,10 @@ struct B
int member1[a];
};
layout(constant_id = 2) const int c = 300;
const int _18 = (c + 50);
layout(constant_id = 3) const int e = 400;
layout(set = 1, binding = 0, std430) buffer SSBO
{
A member_a;

View File

@ -2,15 +2,11 @@
#define SPIRV_CROSS_CONSTANT_ID_201 -10
#endif
static const int _7 = SPIRV_CROSS_CONSTANT_ID_201;
static const int _20 = (_7 + 2);
#ifndef SPIRV_CROSS_CONSTANT_ID_202
#define SPIRV_CROSS_CONSTANT_ID_202 100u
#endif
static const uint _8 = SPIRV_CROSS_CONSTANT_ID_202;
#ifndef SPIRV_CROSS_CONSTANT_ID_200
#define SPIRV_CROSS_CONSTANT_ID_200 3.141590118408203125f
#endif
static const float _9 = SPIRV_CROSS_CONSTANT_ID_200;
static const int _20 = (_7 + 2);
static const uint _25 = (_8 % 5u);
#ifndef SPIRV_CROSS_CONSTANT_ID_0
#define SPIRV_CROSS_CONSTANT_ID_0 int4(20, 30, _20, _20)
@ -18,6 +14,10 @@ static const uint _25 = (_8 % 5u);
static const int4 _30 = SPIRV_CROSS_CONSTANT_ID_0;
static const int2 _32 = int2(_30.y, _30.x);
static const int _33 = _30.y;
#ifndef SPIRV_CROSS_CONSTANT_ID_200
#define SPIRV_CROSS_CONSTANT_ID_200 3.141590118408203125f
#endif
static const float _9 = SPIRV_CROSS_CONSTANT_ID_200;
static float4 gl_Position;
static int _4;

View File

@ -1,15 +1,16 @@
#ifndef SPIRV_CROSS_CONSTANT_ID_0
#define SPIRV_CROSS_CONSTANT_ID_0 4.0f
#endif
static const float X = SPIRV_CROSS_CONSTANT_ID_0;
static const uint3 gl_WorkGroupSize = uint3(2u, 1u, 1u);
struct Data
{
float a;
float b;
};
#ifndef SPIRV_CROSS_CONSTANT_ID_0
#define SPIRV_CROSS_CONSTANT_ID_0 4.0f
#endif
static const float X = SPIRV_CROSS_CONSTANT_ID_0;
static const uint3 gl_WorkGroupSize = uint3(2u, 1u, 1u);
static const Data _21 = { 1.0f, 2.0f };
static const Data _24 = { 3.0f, 4.0f };
static const Data _25[2] = { { 1.0f, 2.0f }, { 3.0f, 4.0f } };

View File

@ -6,15 +6,6 @@ static const int a = SPIRV_CROSS_CONSTANT_ID_0;
#define SPIRV_CROSS_CONSTANT_ID_1 200
#endif
static const int b = SPIRV_CROSS_CONSTANT_ID_1;
#ifndef SPIRV_CROSS_CONSTANT_ID_2
#define SPIRV_CROSS_CONSTANT_ID_2 300
#endif
static const int c = SPIRV_CROSS_CONSTANT_ID_2;
static const int _18 = (c + 50);
#ifndef SPIRV_CROSS_CONSTANT_ID_3
#define SPIRV_CROSS_CONSTANT_ID_3 400
#endif
static const int e = SPIRV_CROSS_CONSTANT_ID_3;
struct A
{
@ -28,6 +19,16 @@ struct B
int member1[a];
};
#ifndef SPIRV_CROSS_CONSTANT_ID_2
#define SPIRV_CROSS_CONSTANT_ID_2 300
#endif
static const int c = SPIRV_CROSS_CONSTANT_ID_2;
static const int _18 = (c + 50);
#ifndef SPIRV_CROSS_CONSTANT_ID_3
#define SPIRV_CROSS_CONSTANT_ID_3 400
#endif
static const int e = SPIRV_CROSS_CONSTANT_ID_3;
RWByteAddressBuffer _22 : register(u0);
static uint3 gl_GlobalInvocationID;

View File

@ -3,17 +3,17 @@
using namespace metal;
struct SSBO
{
float a;
};
constant uint _5_tmp [[function_constant(10)]];
constant uint _5 = is_function_constant_defined(_5_tmp) ? _5_tmp : 9u;
constant uint _6_tmp [[function_constant(12)]];
constant uint _6 = is_function_constant_defined(_6_tmp) ? _6_tmp : 4u;
constant uint3 gl_WorkGroupSize = uint3(_5, 20u, _6);
struct SSBO
{
float a;
};
kernel void main0(device SSBO& _4 [[buffer(0)]])
{
_4.a += 1.0;

View File

@ -3,17 +3,17 @@
using namespace metal;
struct _6
{
float _m0[1];
};
constant uint _3_tmp [[function_constant(0)]];
constant uint _3 = is_function_constant_defined(_3_tmp) ? _3_tmp : 1u;
constant uint _4_tmp [[function_constant(2)]];
constant uint _4 = is_function_constant_defined(_4_tmp) ? _4_tmp : 3u;
constant uint3 gl_WorkGroupSize = uint3(_3, 2u, _4);
struct _6
{
float _m0[1];
};
kernel void main0(device _6& _8 [[buffer(0)]], device _6& _9 [[buffer(1)]], uint3 gl_WorkGroupID [[threadgroup_position_in_grid]])
{
_8._m0[gl_WorkGroupID.x] = _9._m0[gl_WorkGroupID.x] + _8._m0[gl_WorkGroupID.x];

View File

@ -5,13 +5,13 @@
using namespace metal;
constant uint3 gl_WorkGroupSize = uint3(16u, 16u, 1u);
struct cb1_struct
{
float4 _m0[1];
};
constant uint3 gl_WorkGroupSize = uint3(16u, 16u, 1u);
int2 get_texcoord(thread const int2& base, thread const int2& index, thread uint3& gl_LocalInvocationID)
{
return (base * int3(gl_LocalInvocationID).xy) + index;

View File

@ -3,13 +3,13 @@
using namespace metal;
constant uint3 gl_WorkGroupSize = uint3(16u, 16u, 1u);
struct cb1_struct
{
float4 _m0[1];
};
constant uint3 gl_WorkGroupSize = uint3(16u, 16u, 1u);
kernel void main0(constant cb1_struct& cb0_1 [[buffer(0)]], texture2d<float, access::write> u0 [[texture(1)]], uint3 gl_LocalInvocationID [[thread_position_in_threadgroup]])
{
int2 r0 = int2(int2(u0.get_width(), u0.get_height()) >> int2(uint2(4u)));

View File

@ -7,19 +7,19 @@ using namespace metal;
struct Foo
{
float3 a;
packed_float3 a;
float b;
};
struct Foo_1
{
packed_float3 a;
float3 a;
float b;
};
struct buf
{
Foo_1 results[16];
Foo results[16];
float4 bar;
};
@ -31,7 +31,7 @@ struct main0_out
float4 _main(thread const float4& pos, constant buf& v_11)
{
int _46 = int(pos.x) % 16;
Foo foo;
Foo_1 foo;
foo.a = v_11.results[_46].a;
foo.b = v_11.results[_46].b;
return float4(dot(foo.a, v_11.bar.xyz), foo.b, 0.0, 0.0);

View File

@ -3,6 +3,11 @@
using namespace metal;
struct _28
{
float4 _m0;
};
struct _6
{
float4 _m0;
@ -113,11 +118,6 @@ struct _21
float4 _m0;
};
struct _28
{
float4 _m0;
};
constant _28 _74 = {};
struct main0_out

View File

@ -5,15 +5,15 @@ using namespace metal;
constant int _7_tmp [[function_constant(201)]];
constant int _7 = is_function_constant_defined(_7_tmp) ? _7_tmp : -10;
constant int _20 = (_7 + 2);
constant uint _8_tmp [[function_constant(202)]];
constant uint _8 = is_function_constant_defined(_8_tmp) ? _8_tmp : 100u;
constant float _9_tmp [[function_constant(200)]];
constant float _9 = is_function_constant_defined(_9_tmp) ? _9_tmp : 3.141590118408203125;
constant int _20 = (_7 + 2);
constant uint _25 = (_8 % 5u);
constant int4 _30 = int4(20, 30, _20, _20);
constant int2 _32 = int2(_30.y, _30.x);
constant int _33 = _30.y;
constant float _9_tmp [[function_constant(200)]];
constant float _9 = is_function_constant_defined(_9_tmp) ? _9_tmp : 3.141590118408203125;
struct main0_out
{

View File

@ -5,10 +5,6 @@
using namespace metal;
constant float X_tmp [[function_constant(0)]];
constant float X = is_function_constant_defined(X_tmp) ? X_tmp : 4.0;
constant uint3 gl_WorkGroupSize = uint3(2u, 1u, 1u);
struct Data
{
float a;
@ -21,12 +17,17 @@ struct Data_1
float b;
};
constant float X_tmp [[function_constant(0)]];
constant float X = is_function_constant_defined(X_tmp) ? X_tmp : 4.0;
struct SSBO
{
Data_1 outdata[1];
Data outdata[1];
};
constant Data _25[2] = { Data{ 1.0, 2.0 }, Data{ 3.0, 4.0 } };
constant uint3 gl_WorkGroupSize = uint3(2u, 1u, 1u);
constant Data_1 _25[2] = { Data_1{ 1.0, 2.0 }, Data_1{ 3.0, 4.0 } };
// Implementation of an array copy function to cover GLSL's ability to copy an array via assignment.
template<typename T, uint N>
@ -41,20 +42,20 @@ void spvArrayCopyFromConstant1(thread T (&dst)[N], constant T (&src)[N])
for (uint i = 0; i < N; dst[i] = src[i], i++);
}
Data combine(thread const Data& a, thread const Data& b)
Data_1 combine(thread const Data_1& a, thread const Data_1& b)
{
return Data{ a.a + b.a, a.b + b.b };
return Data_1{ a.a + b.a, a.b + b.b };
}
kernel void main0(device SSBO& _53 [[buffer(0)]], uint3 gl_WorkGroupID [[threadgroup_position_in_grid]], uint3 gl_LocalInvocationID [[thread_position_in_threadgroup]])
{
Data data[2] = { Data{ 1.0, 2.0 }, Data{ 3.0, 4.0 } };
Data _31[2] = { Data{ X, 2.0 }, Data{ 3.0, 5.0 } };
Data data2[2];
Data_1 data[2] = { Data_1{ 1.0, 2.0 }, Data_1{ 3.0, 4.0 } };
Data_1 _31[2] = { Data_1{ X, 2.0 }, Data_1{ 3.0, 5.0 } };
Data_1 data2[2];
spvArrayCopyFromStack1(data2, _31);
Data param = data[gl_LocalInvocationID.x];
Data param_1 = data2[gl_LocalInvocationID.x];
Data _73 = combine(param, param_1);
Data_1 param = data[gl_LocalInvocationID.x];
Data_1 param_1 = data2[gl_LocalInvocationID.x];
Data_1 _73 = combine(param, param_1);
_53.outdata[gl_WorkGroupID.x].a = _73.a;
_53.outdata[gl_WorkGroupID.x].b = _73.b;
}

View File

@ -6,8 +6,6 @@
using namespace metal;
constant uint3 gl_WorkGroupSize = uint3(4u, 1u, 1u);
struct SSBO
{
float in_data[1];
@ -23,6 +21,8 @@ struct SSBO3
uint count;
};
constant uint3 gl_WorkGroupSize = uint3(4u, 1u, 1u);
kernel void main0(const device SSBO& _22 [[buffer(0)]], device SSBO2& _38 [[buffer(1)]], device SSBO3& _41 [[buffer(2)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]])
{
uint ident = gl_GlobalInvocationID.x;

View File

@ -3,23 +3,21 @@
using namespace metal;
constant uint3 gl_WorkGroupSize = uint3(32u, 1u, 1u);
struct T1
{
float3 a;
float b;
};
struct T1_1
{
packed_float3 a;
float b;
};
struct T1_1
{
float3 a;
float b;
};
struct Buffer0
{
T1_1 buf0[1];
T1 buf0[1];
};
struct Buffer1
@ -27,9 +25,11 @@ struct Buffer1
float buf1[1];
};
constant uint3 gl_WorkGroupSize = uint3(32u, 1u, 1u);
kernel void main0(device Buffer0& _15 [[buffer(1)]], device Buffer1& _34 [[buffer(2)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]])
{
T1 v;
T1_1 v;
v.a = _15.buf0[0].a;
v.b = _15.buf0[0].b;
float x = v.b;

View File

@ -3,8 +3,6 @@
using namespace metal;
constant uint3 gl_WorkGroupSize = uint3(32u, 1u, 1u);
struct T1
{
packed_float3 a;
@ -21,6 +19,8 @@ struct Buffer1
float buf1[1];
};
constant uint3 gl_WorkGroupSize = uint3(32u, 1u, 1u);
kernel void main0(device Buffer0& _14 [[buffer(1)]], device Buffer1& _24 [[buffer(2)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]])
{
float x = _14.buf0[0].b;

View File

@ -5,13 +5,13 @@
using namespace metal;
constant uint3 gl_WorkGroupSize = uint3(4u, 4u, 1u);
struct SSBO
{
float out_data[1];
};
constant uint3 gl_WorkGroupSize = uint3(4u, 4u, 1u);
void work(threadgroup float (&foo)[4][4], thread uint3& gl_LocalInvocationID, thread uint& gl_LocalInvocationIndex, device SSBO& v_67, thread uint3& gl_GlobalInvocationID)
{
foo[gl_LocalInvocationID.x][gl_LocalInvocationID.y] = float(gl_LocalInvocationIndex);

View File

@ -3,8 +3,6 @@
using namespace metal;
constant uint3 gl_WorkGroupSize = uint3(4u, 1u, 1u);
struct SSBO
{
float in_data[1];
@ -15,6 +13,8 @@ struct SSBO2
float out_data[1];
};
constant uint3 gl_WorkGroupSize = uint3(4u, 1u, 1u);
kernel void main0(const device SSBO& _22 [[buffer(0)]], device SSBO2& _44 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]], uint gl_LocalInvocationIndex [[thread_index_in_threadgroup]])
{
threadgroup float sShared[4];

View File

@ -11,13 +11,6 @@ constant int a = SPIRV_CROSS_CONSTANT_ID_0;
#define SPIRV_CROSS_CONSTANT_ID_1 200
#endif
constant int b = SPIRV_CROSS_CONSTANT_ID_1;
#ifndef SPIRV_CROSS_CONSTANT_ID_2
#define SPIRV_CROSS_CONSTANT_ID_2 300
#endif
constant int c = SPIRV_CROSS_CONSTANT_ID_2;
constant int _18 = (c + 50);
constant int e_tmp [[function_constant(3)]];
constant int e = is_function_constant_defined(e_tmp) ? e_tmp : 400;
struct A
{
@ -31,6 +24,12 @@ struct B
int member1[a];
};
#ifndef SPIRV_CROSS_CONSTANT_ID_2
#define SPIRV_CROSS_CONSTANT_ID_2 300
#endif
constant int c = SPIRV_CROSS_CONSTANT_ID_2;
constant int _18 = (c + 50);
struct SSBO
{
A member_a;
@ -39,6 +38,9 @@ struct SSBO
int w[_18];
};
constant int e_tmp [[function_constant(3)]];
constant int e = is_function_constant_defined(e_tmp) ? e_tmp : 400;
kernel void main0(device SSBO& _22 [[buffer(0)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]])
{
_22.w[gl_GlobalInvocationID.x] += (_22.v[gl_GlobalInvocationID.x] + e);

View File

@ -9,6 +9,12 @@ using namespace metal;
constant int b = SPIRV_CROSS_CONSTANT_ID_1;
constant int a_tmp [[function_constant(0)]];
constant int a = is_function_constant_defined(a_tmp) ? a_tmp : 1;
struct SSBO
{
int v[1];
};
constant uint _21 = (uint(a) + 0u);
constant uint _22_tmp [[function_constant(10)]];
constant uint _22 = is_function_constant_defined(_22_tmp) ? _22_tmp : 1u;
@ -19,11 +25,6 @@ constant uint _29 = gl_WorkGroupSize.y;
constant uint _30 = (_28 + _29);
constant int _32 = (1 - a);
struct SSBO
{
int v[1];
};
kernel void main0(device SSBO& _17 [[buffer(0)]])
{
int spec_const_array_size[b];

View File

@ -13,24 +13,24 @@ struct s2
s1 b;
};
struct s2_1
{
s1 b;
};
struct s1_1
{
int a;
};
struct s2_1
{
s1_1 b;
};
struct dstbuffer
{
s2_1 test[1];
s2 test[1];
};
kernel void main0(device dstbuffer& _19 [[buffer(1)]])
{
s2 testVal;
s2_1 testVal;
testVal.b.a = 0;
_19.test[0].b.a = testVal.b.a;
}

View File

@ -10,19 +10,14 @@ struct S0
float4 a;
};
struct S1
{
float4 a;
};
struct S0_1
{
float4 a;
};
struct SSBO0
struct S1
{
S0_1 s0s[1];
float4 a;
};
struct S1_1
@ -30,9 +25,14 @@ struct S1_1
float4 a;
};
struct SSBO0
{
S0 s0s[1];
};
struct SSBO1
{
S1_1 s1s[1];
S1 s1s[1];
};
struct SSBO2
@ -40,24 +40,24 @@ struct SSBO2
float4 outputs[1];
};
float4 overload(thread const S0& s0)
float4 overload(thread const S0_1& s0)
{
return s0.a;
}
float4 overload(thread const S1& s1)
float4 overload(thread const S1_1& s1)
{
return s1.a;
}
kernel void main0(device SSBO0& _36 [[buffer(0)]], device SSBO1& _55 [[buffer(1)]], device SSBO2& _66 [[buffer(2)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]])
{
S0 s0;
S0_1 s0;
s0.a = _36.s0s[gl_GlobalInvocationID.x].a;
S1 s1;
S1_1 s1;
s1.a = _55.s1s[gl_GlobalInvocationID.x].a;
S0 param = s0;
S1 param_1 = s1;
S0_1 param = s0;
S1_1 param_1 = s1;
_66.outputs[gl_GlobalInvocationID.x] = overload(param) + overload(param_1);
}

View File

@ -27,19 +27,6 @@ struct main0_in
int index [[user(locn0)]];
};
// Implementation of an array copy function to cover GLSL's ability to copy an array via assignment.
template<typename T, uint N>
void spvArrayCopyFromStack1(thread T (&dst)[N], thread const T (&src)[N])
{
for (uint i = 0; i < N; dst[i] = src[i], i++);
}
template<typename T, uint N>
void spvArrayCopyFromConstant1(thread T (&dst)[N], constant T (&src)[N])
{
for (uint i = 0; i < N; dst[i] = src[i], i++);
}
float4 resolve(thread const Foobar& f)
{
return float4(f.a + f.b);
@ -49,8 +36,7 @@ fragment main0_out main0(main0_in in [[stage_in]])
{
main0_out out = {};
Foobar param = Foobar{ 10.0, 20.0 };
Foobar indexable[2] = { Foobar{ 10.0, 40.0 }, Foobar{ 90.0, 70.0 } };
Foobar param_1 = indexable[in.index];
Foobar param_1 = _75[in.index];
out.FragColor = ((_37[in.index] + _55[in.index][in.index + 1]) + resolve(param)) + resolve(param_1);
return out;
}

View File

@ -12,19 +12,19 @@ struct VertexOutput
struct TestStruct
{
float3 position;
packed_float3 position;
float radius;
};
struct TestStruct_1
{
packed_float3 position;
float3 position;
float radius;
};
struct CB0
{
TestStruct_1 CB0[16];
TestStruct CB0[16];
};
struct main0_out
@ -34,7 +34,7 @@ struct main0_out
float4 _main(thread const VertexOutput& IN, constant CB0& v_26)
{
TestStruct st;
TestStruct_1 st;
st.position = v_26.CB0[1].position;
st.radius = v_26.CB0[1].radius;
float4 col = float4(st.position, st.radius);

View File

@ -5,11 +5,6 @@
using namespace metal;
#ifndef SPIRV_CROSS_CONSTANT_ID_0
#define SPIRV_CROSS_CONSTANT_ID_0 3
#endif
constant int arraySize = SPIRV_CROSS_CONSTANT_ID_0;
struct storage_block
{
uint4 baz;
@ -22,6 +17,11 @@ struct constant_block
int bar;
};
#ifndef SPIRV_CROSS_CONSTANT_ID_0
#define SPIRV_CROSS_CONSTANT_ID_0 3
#endif
constant int arraySize = SPIRV_CROSS_CONSTANT_ID_0;
void doWork(device storage_block* (&storage)[2], constant constant_block* (&constants)[4], thread const array<texture2d<int>, 3> images)
{
storage[0]->baz = uint4(constants[3]->foo);

View File

@ -3,11 +3,6 @@
using namespace metal;
#ifndef SPIRV_CROSS_CONSTANT_ID_0
#define SPIRV_CROSS_CONSTANT_ID_0 3
#endif
constant int arraySize = SPIRV_CROSS_CONSTANT_ID_0;
struct storage_block
{
uint4 baz;
@ -20,6 +15,11 @@ struct constant_block
int bar;
};
#ifndef SPIRV_CROSS_CONSTANT_ID_0
#define SPIRV_CROSS_CONSTANT_ID_0 3
#endif
constant int arraySize = SPIRV_CROSS_CONSTANT_ID_0;
vertex void main0(device storage_block* storage_0 [[buffer(0)]], device storage_block* storage_1 [[buffer(1)]], constant constant_block* constants_0 [[buffer(2)]], constant constant_block* constants_1 [[buffer(3)]], constant constant_block* constants_2 [[buffer(4)]], constant constant_block* constants_3 [[buffer(5)]], array<texture2d<int>, 3> images [[texture(0)]])
{
device storage_block* storage[] =

View File

@ -0,0 +1,25 @@
#version 450
struct Foo
{
vec4 a;
};
struct Bar
{
Foo foo;
Foo foo2;
};
layout(binding = 0, std140) uniform UBO
{
Bar bar;
} _7;
layout(location = 0) out vec4 FragColor;
void main()
{
FragColor = _7.bar.foo.a + _7.bar.foo2.a;
}

View File

@ -2,14 +2,14 @@
precision mediump float;
precision highp int;
const vec4 _14[4] = vec4[](vec4(0.0), vec4(0.0), vec4(0.0), vec4(0.0));
struct D
{
vec4 a;
float b;
};
const vec4 _14[4] = vec4[](vec4(0.0), vec4(0.0), vec4(0.0), vec4(0.0));
layout(location = 0) out float FragColor;
void main()

View File

@ -4,19 +4,19 @@
#define SPIRV_CROSS_CONSTANT_ID_201 -10
#endif
const int _7 = SPIRV_CROSS_CONSTANT_ID_201;
const int _20 = (_7 + 2);
#ifndef SPIRV_CROSS_CONSTANT_ID_202
#define SPIRV_CROSS_CONSTANT_ID_202 100u
#endif
const uint _8 = SPIRV_CROSS_CONSTANT_ID_202;
#ifndef SPIRV_CROSS_CONSTANT_ID_200
#define SPIRV_CROSS_CONSTANT_ID_200 3.141590118408203125
#endif
const float _9 = SPIRV_CROSS_CONSTANT_ID_200;
const int _20 = (_7 + 2);
const uint _25 = (_8 % 5u);
const ivec4 _30 = ivec4(20, 30, _20, _20);
const ivec2 _32 = ivec2(_30.y, _30.x);
const int _33 = _30.y;
#ifndef SPIRV_CROSS_CONSTANT_ID_200
#define SPIRV_CROSS_CONSTANT_ID_200 3.141590118408203125
#endif
const float _9 = SPIRV_CROSS_CONSTANT_ID_200;
layout(location = 0) flat out int _4;

View File

@ -1,13 +1,13 @@
#version 450
layout(constant_id = 201) const int _7 = -10;
layout(constant_id = 202) const uint _8 = 100u;
layout(constant_id = 200) const float _9 = 3.141590118408203125;
const int _20 = (_7 + 2);
layout(constant_id = 202) const uint _8 = 100u;
const uint _25 = (_8 % 5u);
const ivec4 _30 = ivec4(20, 30, _20, _20);
const ivec2 _32 = ivec2(_30.y, _30.x);
const int _33 = _30.y;
layout(constant_id = 200) const float _9 = 3.141590118408203125;
layout(location = 0) flat out int _4;

View File

@ -1,17 +1,17 @@
#version 310 es
layout(local_size_x = 2, local_size_y = 1, local_size_z = 1) in;
#ifndef SPIRV_CROSS_CONSTANT_ID_0
#define SPIRV_CROSS_CONSTANT_ID_0 4.0
#endif
const float X = SPIRV_CROSS_CONSTANT_ID_0;
struct Data
{
float a;
float b;
};
#ifndef SPIRV_CROSS_CONSTANT_ID_0
#define SPIRV_CROSS_CONSTANT_ID_0 4.0
#endif
const float X = SPIRV_CROSS_CONSTANT_ID_0;
layout(binding = 0, std430) buffer SSBO
{
Data outdata[];

View File

@ -2,7 +2,6 @@
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
const vec4 _66[2] = vec4[](vec4(10.0), vec4(30.0));
const float _94[2][3] = float[][](float[](1.0, 1.0, 1.0), float[](2.0, 2.0, 2.0));
struct Composite
{
@ -10,6 +9,8 @@ struct Composite
vec4 b[2];
};
const float _94[2][3] = float[][](float[](1.0, 1.0, 1.0), float[](2.0, 2.0, 2.0));
layout(binding = 0, std430) buffer SSBO0
{
vec4 as[];

View File

@ -2,15 +2,16 @@
precision mediump float;
precision highp int;
const vec4 _37[3] = vec4[](vec4(1.0), vec4(2.0), vec4(3.0));
const vec4 _55[2][2] = vec4[][](vec4[](vec4(1.0), vec4(2.0)), vec4[](vec4(8.0), vec4(10.0)));
struct Foobar
{
float a;
float b;
};
const vec4 _37[3] = vec4[](vec4(1.0), vec4(2.0), vec4(3.0));
const vec4 _55[2][2] = vec4[][](vec4[](vec4(1.0), vec4(2.0)), vec4[](vec4(8.0), vec4(10.0)));
const Foobar _75[2] = Foobar[](Foobar(10.0, 40.0), Foobar(90.0, 70.0));
layout(location = 0) out vec4 FragColor;
layout(location = 0) flat in mediump int index;
@ -22,8 +23,7 @@ vec4 resolve(Foobar f)
void main()
{
Foobar param = Foobar(10.0, 20.0);
Foobar indexable[2] = Foobar[](Foobar(10.0, 40.0), Foobar(90.0, 70.0));
Foobar param_1 = indexable[index];
Foobar param_1 = _75[index];
FragColor = ((_37[index] + _55[index][index + 1]) + resolve(param)) + resolve(param_1);
}

View File

@ -9,15 +9,6 @@ const int a = SPIRV_CROSS_CONSTANT_ID_0;
#define SPIRV_CROSS_CONSTANT_ID_1 200
#endif
const int b = SPIRV_CROSS_CONSTANT_ID_1;
#ifndef SPIRV_CROSS_CONSTANT_ID_2
#define SPIRV_CROSS_CONSTANT_ID_2 300
#endif
const int c = SPIRV_CROSS_CONSTANT_ID_2;
const int _18 = (c + 50);
#ifndef SPIRV_CROSS_CONSTANT_ID_3
#define SPIRV_CROSS_CONSTANT_ID_3 400
#endif
const int e = SPIRV_CROSS_CONSTANT_ID_3;
struct A
{
@ -31,6 +22,16 @@ struct B
int member1[a];
};
#ifndef SPIRV_CROSS_CONSTANT_ID_2
#define SPIRV_CROSS_CONSTANT_ID_2 300
#endif
const int c = SPIRV_CROSS_CONSTANT_ID_2;
const int _18 = (c + 50);
#ifndef SPIRV_CROSS_CONSTANT_ID_3
#define SPIRV_CROSS_CONSTANT_ID_3 400
#endif
const int e = SPIRV_CROSS_CONSTANT_ID_3;
layout(binding = 0, std430) buffer SSBO
{
A member_a;

View File

@ -3,9 +3,6 @@ layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
layout(constant_id = 0) const int a = 100;
layout(constant_id = 1) const int b = 200;
layout(constant_id = 2) const int c = 300;
const int _18 = (c + 50);
layout(constant_id = 3) const int e = 400;
struct A
{
@ -19,6 +16,10 @@ struct B
int member1[a];
};
layout(constant_id = 2) const int c = 300;
const int _18 = (c + 50);
layout(constant_id = 3) const int e = 400;
layout(set = 1, binding = 0, std430) buffer SSBO
{
A member_a;

View File

@ -0,0 +1,54 @@
; SPIR-V
; Version: 1.0
; Generator: Khronos Glslang Reference Front End; 7
; Bound: 24
; Schema: 0
OpCapability Shader
%1 = OpExtInstImport "GLSL.std.450"
OpMemoryModel Logical GLSL450
OpEntryPoint Fragment %main "main" %FragColor
OpExecutionMode %main OriginUpperLeft
OpSource GLSL 450
OpName %main "main"
OpName %FragColor "FragColor"
OpName %80 "Foo"
OpMemberName %80 0 "a"
OpName %79 "Bar"
OpMemberName %79 0 "foo"
OpMemberName %79 1 "foo2"
OpName %UBO "UBO"
OpMemberName %UBO 0 "bar"
OpName %_ ""
OpDecorate %FragColor Location 0
OpMemberDecorate %80 0 Offset 0
OpMemberDecorate %79 0 Offset 0
OpMemberDecorate %79 1 Offset 16
OpMemberDecorate %UBO 0 Offset 0
OpDecorate %UBO Block
OpDecorate %_ DescriptorSet 0
OpDecorate %_ Binding 0
%void = OpTypeVoid
%3 = OpTypeFunction %void
%float = OpTypeFloat 32
%v4float = OpTypeVector %float 4
%_ptr_Output_v4float = OpTypePointer Output %v4float
%FragColor = OpVariable %_ptr_Output_v4float Output
%80 = OpTypeStruct %v4float
%79 = OpTypeStruct %80 %80
%UBO = OpTypeStruct %79
%_ptr_Uniform_UBO = OpTypePointer Uniform %UBO
%_ = OpVariable %_ptr_Uniform_UBO Uniform
%int = OpTypeInt 32 1
%int_0 = OpConstant %int 0
%_ptr_Uniform_v4float = OpTypePointer Uniform %v4float
%int_1 = OpConstant %int 1
%main = OpFunction %void None %3
%5 = OpLabel
%18 = OpAccessChain %_ptr_Uniform_v4float %_ %int_0 %int_0 %int_0
%19 = OpLoad %v4float %18
%21 = OpAccessChain %_ptr_Uniform_v4float %_ %int_0 %int_1 %int_0
%22 = OpLoad %v4float %21
%23 = OpFAdd %v4float %19 %22
OpStore %FragColor %23
OpReturn
OpFunctionEnd

View File

@ -27,11 +27,6 @@ CFG::CFG(Compiler &compiler_, const SPIRFunction &func_)
: compiler(compiler_)
, func(func_)
{
preceding_edges.resize(compiler.get_current_id_bound());
succeeding_edges.resize(compiler.get_current_id_bound());
visit_order.resize(compiler.get_current_id_bound());
immediate_dominators.resize(compiler.get_current_id_bound());
build_post_order_visit_order();
build_immediate_dominators();
}
@ -40,10 +35,10 @@ uint32_t CFG::find_common_dominator(uint32_t a, uint32_t b) const
{
while (a != b)
{
if (visit_order[a] < visit_order[b])
a = immediate_dominators[a];
if (get_visit_order(a) < get_visit_order(b))
a = get_immediate_dominator(a);
else
b = immediate_dominators[b];
b = get_immediate_dominator(b);
}
return a;
}
@ -51,7 +46,7 @@ uint32_t CFG::find_common_dominator(uint32_t a, uint32_t b) const
void CFG::build_immediate_dominators()
{
// Traverse the post-order in reverse and build up the immediate dominator tree.
fill(begin(immediate_dominators), end(immediate_dominators), 0);
immediate_dominators.clear();
immediate_dominators[func.entry_block] = func.entry_block;
for (auto i = post_order.size(); i; i--)
@ -78,7 +73,9 @@ bool CFG::is_back_edge(uint32_t to) const
{
// We have a back edge if the visit order is set with the temporary magic value 0.
// Crossing edges will have already been recorded with a visit order.
return visit_order[to] == 0;
auto itr = visit_order.find(to);
assert(itr != end(visit_order));
return itr->second.get() == 0;
}
bool CFG::post_order_visit(uint32_t block_id)
@ -86,11 +83,11 @@ bool CFG::post_order_visit(uint32_t block_id)
// If we have already branched to this block (back edge), stop recursion.
// If our branches are back-edges, we do not record them.
// We have to record crossing edges however.
if (visit_order[block_id] >= 0)
if (visit_order[block_id].get() >= 0)
return !is_back_edge(block_id);
// Block back-edges from recursively revisiting ourselves.
visit_order[block_id] = 0;
visit_order[block_id].get() = 0;
// First visit our branch targets.
auto &block = compiler.get<SPIRBlock>(block_id);
@ -130,7 +127,7 @@ bool CFG::post_order_visit(uint32_t block_id)
add_branch(block_id, block.merge_block);
// Then visit ourselves. Start counting at one, to let 0 be a magic value for testing back vs. crossing edges.
visit_order[block_id] = ++visit_count;
visit_order[block_id].get() = ++visit_count;
post_order.push_back(block_id);
return true;
}
@ -139,7 +136,7 @@ void CFG::build_post_order_visit_order()
{
uint32_t block = func.entry_block;
visit_count = 0;
fill(begin(visit_order), end(visit_order), -1);
visit_order.clear();
post_order.clear();
post_order_visit(block);
}

View File

@ -45,12 +45,18 @@ public:
uint32_t get_immediate_dominator(uint32_t block) const
{
return immediate_dominators[block];
auto itr = immediate_dominators.find(block);
if (itr != std::end(immediate_dominators))
return itr->second;
else
return 0;
}
uint32_t get_visit_order(uint32_t block) const
{
int v = visit_order[block];
auto itr = visit_order.find(block);
assert(itr != std::end(visit_order));
int v = itr->second.get();
assert(v > 0);
return uint32_t(v);
}
@ -59,12 +65,20 @@ public:
const std::vector<uint32_t> &get_preceding_edges(uint32_t block) const
{
return preceding_edges[block];
auto itr = preceding_edges.find(block);
if (itr != std::end(preceding_edges))
return itr->second;
else
return empty_vector;
}
const std::vector<uint32_t> &get_succeeding_edges(uint32_t block) const
{
return succeeding_edges[block];
auto itr = succeeding_edges.find(block);
if (itr != std::end(succeeding_edges))
return itr->second;
else
return empty_vector;
}
template <typename Op>
@ -75,18 +89,34 @@ public:
seen_blocks.insert(block);
op(block);
for (auto b : succeeding_edges[block])
for (auto b : get_succeeding_edges(block))
walk_from(seen_blocks, b, op);
}
private:
struct VisitOrder
{
int &get()
{
return v;
}
const int &get() const
{
return v;
}
int v = -1;
};
Compiler &compiler;
const SPIRFunction &func;
std::vector<std::vector<uint32_t>> preceding_edges;
std::vector<std::vector<uint32_t>> succeeding_edges;
std::vector<uint32_t> immediate_dominators;
std::vector<int> visit_order;
std::unordered_map<uint32_t, std::vector<uint32_t>> preceding_edges;
std::unordered_map<uint32_t, std::vector<uint32_t>> succeeding_edges;
std::unordered_map<uint32_t, uint32_t> immediate_dominators;
std::unordered_map<uint32_t, VisitOrder> visit_order;
std::vector<uint32_t> post_order;
std::vector<uint32_t> empty_vector;
void add_branch(uint32_t from, uint32_t to);
void build_post_order_visit_order();

View File

@ -322,14 +322,14 @@ enum Types
TypeConstant,
TypeFunction,
TypeFunctionPrototype,
TypePointer,
TypeBlock,
TypeExtension,
TypeExpression,
TypeConstantOp,
TypeCombinedImageSampler,
TypeAccessChain,
TypeUndef
TypeUndef,
TypeCount
};
struct SPIRUndef : IVariant
@ -1250,7 +1250,7 @@ public:
return *this;
}
void set(std::unique_ptr<IVariant> val, uint32_t new_type)
void set(std::unique_ptr<IVariant> val, Types new_type)
{
holder = std::move(val);
if (!allow_type_rewrite && type != TypeNone && type != new_type)
@ -1264,7 +1264,7 @@ public:
{
if (!holder)
SPIRV_CROSS_THROW("nullptr");
if (T::type != type)
if (static_cast<Types>(T::type) != type)
SPIRV_CROSS_THROW("Bad cast");
return *static_cast<T *>(holder.get());
}
@ -1274,12 +1274,12 @@ public:
{
if (!holder)
SPIRV_CROSS_THROW("nullptr");
if (T::type != type)
if (static_cast<Types>(T::type) != type)
SPIRV_CROSS_THROW("Bad cast");
return *static_cast<const T *>(holder.get());
}
uint32_t get_type() const
Types get_type() const
{
return type;
}
@ -1307,7 +1307,7 @@ public:
private:
std::unique_ptr<IVariant> holder;
uint32_t type = TypeNone;
Types type = TypeNone;
bool allow_type_rewrite = false;
};
@ -1328,7 +1328,7 @@ T &variant_set(Variant &var, P &&... args)
{
auto uptr = std::unique_ptr<T>(new T(std::forward<P>(args)...));
auto ptr = uptr.get();
var.set(std::move(uptr), T::type);
var.set(std::move(uptr), static_cast<Types>(T::type));
return *ptr;
}

View File

@ -158,7 +158,7 @@ bool Compiler::block_is_pure(const SPIRBlock &block)
string Compiler::to_name(uint32_t id, bool allow_alias) const
{
if (allow_alias && ir.ids.at(id).get_type() == TypeType)
if (allow_alias && ir.ids[id].get_type() == TypeType)
{
// If this type is a simple alias, emit the
// name of the original type instead.
@ -174,10 +174,11 @@ string Compiler::to_name(uint32_t id, bool allow_alias) const
}
}
if (ir.meta[id].decoration.alias.empty())
auto &alias = ir.get_name(id);
if (alias.empty())
return join("_", id);
else
return ir.meta[id].decoration.alias;
return alias;
}
bool Compiler::function_is_pure(const SPIRFunction &func)
@ -473,8 +474,11 @@ bool Compiler::is_hidden_variable(const SPIRVariable &var, bool include_builtins
bool Compiler::is_builtin_type(const SPIRType &type) const
{
auto *type_meta = ir.find_meta(type.self);
// We can have builtin structs as well. If one member of a struct is builtin, the struct must also be builtin.
for (auto &m : ir.meta[type.self].members)
if (type_meta)
for (auto &m : type_meta->members)
if (m.builtin)
return true;
@ -483,7 +487,9 @@ bool Compiler::is_builtin_type(const SPIRType &type) const
bool Compiler::is_builtin_variable(const SPIRVariable &var) const
{
if (var.compat_builtin || ir.meta[var.self].decoration.builtin)
auto *m = ir.find_meta(var.self);
if (var.compat_builtin || (m && m->decoration.builtin))
return true;
else
return is_builtin_type(get<SPIRType>(var.basetype));
@ -491,13 +497,18 @@ bool Compiler::is_builtin_variable(const SPIRVariable &var) const
bool Compiler::is_member_builtin(const SPIRType &type, uint32_t index, BuiltIn *builtin) const
{
auto &memb = ir.meta[type.self].members;
auto *type_meta = ir.find_meta(type.self);
if (type_meta)
{
auto &memb = type_meta->members;
if (index < memb.size() && memb[index].builtin)
{
if (builtin)
*builtin = memb[index].builtin_type;
return true;
}
}
return false;
}
@ -707,55 +718,52 @@ ShaderResources Compiler::get_shader_resources(const unordered_set<uint32_t> *ac
{
ShaderResources res;
for (auto &id : ir.ids)
{
if (id.get_type() != TypeVariable)
continue;
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, const SPIRVariable &var) {
auto &type = this->get<SPIRType>(var.basetype);
// It is possible for uniform storage classes to be passed as function parameters, so detect
// that. To detect function parameters, check of StorageClass of variable is function scope.
if (var.storage == StorageClassFunction || !type.pointer || is_builtin_variable(var))
continue;
return;
if (active_variables && active_variables->find(var.self) == end(*active_variables))
continue;
return;
// Input
if (var.storage == StorageClassInput && interface_variable_exists_in_entry_point(var.self))
{
if (ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock))
if (has_decoration(type.self, DecorationBlock))
{
res.stage_inputs.push_back(
{var.self, var.basetype, type.self, get_remapped_declared_block_name(var.self)});
}
else
res.stage_inputs.push_back({ var.self, var.basetype, type.self, ir.meta[var.self].decoration.alias });
res.stage_inputs.push_back({ var.self, var.basetype, type.self, get_name(var.self) });
}
// Subpass inputs
else if (var.storage == StorageClassUniformConstant && type.image.dim == DimSubpassData)
{
res.subpass_inputs.push_back({ var.self, var.basetype, type.self, ir.meta[var.self].decoration.alias });
res.subpass_inputs.push_back({ var.self, var.basetype, type.self, get_name(var.self) });
}
// Outputs
else if (var.storage == StorageClassOutput && interface_variable_exists_in_entry_point(var.self))
{
if (ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock))
if (has_decoration(type.self, DecorationBlock))
{
res.stage_outputs.push_back(
{var.self, var.basetype, type.self, get_remapped_declared_block_name(var.self)});
}
else
res.stage_outputs.push_back({ var.self, var.basetype, type.self, ir.meta[var.self].decoration.alias });
res.stage_outputs.push_back({ var.self, var.basetype, type.self, get_name(var.self) });
}
// UBOs
else if (type.storage == StorageClassUniform &&
(ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock)))
else if (type.storage == StorageClassUniform && has_decoration(type.self, DecorationBlock))
{
res.uniform_buffers.push_back(
{ var.self, var.basetype, type.self, get_remapped_declared_block_name(var.self) });
}
// Old way to declare SSBOs.
else if (type.storage == StorageClassUniform &&
(ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock)))
else if (type.storage == StorageClassUniform && has_decoration(type.self, DecorationBufferBlock))
{
res.storage_buffers.push_back(
{ var.self, var.basetype, type.self, get_remapped_declared_block_name(var.self) });
@ -772,36 +780,36 @@ ShaderResources Compiler::get_shader_resources(const unordered_set<uint32_t> *ac
// There can only be one push constant block, but keep the vector in case this restriction is lifted
// in the future.
res.push_constant_buffers.push_back(
{ var.self, var.basetype, type.self, ir.meta[var.self].decoration.alias });
{ var.self, var.basetype, type.self, get_name(var.self) });
}
// Images
else if (type.storage == StorageClassUniformConstant && type.basetype == SPIRType::Image &&
type.image.sampled == 2)
{
res.storage_images.push_back({ var.self, var.basetype, type.self, ir.meta[var.self].decoration.alias });
res.storage_images.push_back({ var.self, var.basetype, type.self, get_name(var.self) });
}
// Separate images
else if (type.storage == StorageClassUniformConstant && type.basetype == SPIRType::Image &&
type.image.sampled == 1)
{
res.separate_images.push_back({ var.self, var.basetype, type.self, ir.meta[var.self].decoration.alias });
res.separate_images.push_back({ var.self, var.basetype, type.self, get_name(var.self) });
}
// Separate samplers
else if (type.storage == StorageClassUniformConstant && type.basetype == SPIRType::Sampler)
{
res.separate_samplers.push_back({ var.self, var.basetype, type.self, ir.meta[var.self].decoration.alias });
res.separate_samplers.push_back({ var.self, var.basetype, type.self, get_name(var.self) });
}
// Textures
else if (type.storage == StorageClassUniformConstant && type.basetype == SPIRType::SampledImage)
{
res.sampled_images.push_back({ var.self, var.basetype, type.self, ir.meta[var.self].decoration.alias });
res.sampled_images.push_back({ var.self, var.basetype, type.self, get_name(var.self) });
}
// Atomic counters
else if (type.storage == StorageClassAtomicCounter)
{
res.atomic_counters.push_back({ var.self, var.basetype, type.self, ir.meta[var.self].decoration.alias });
}
res.atomic_counters.push_back({ var.self, var.basetype, type.self, get_name(var.self) });
}
});
return res;
}
@ -829,52 +837,67 @@ void Compiler::fixup_type_alias()
// Due to how some backends work, the "master" type of type_alias must be a block-like type if it exists.
// FIXME: Multiple alias types which are both block-like will be awkward, for now, it's best to just drop the type
// alias if the slave type is a block type.
for (auto &id : ir.ids)
{
if (id.get_type() != TypeType)
continue;
auto &type = id.get<SPIRType>();
ir.for_each_typed_id<SPIRType>([&](uint32_t self, SPIRType &type) {
if (type.type_alias && type_is_block_like(type))
{
// Become the master.
for (auto &other_id : ir.ids)
{
if (other_id.get_type() != TypeType)
continue;
if (other_id.get_id() == type.self)
continue;
ir.for_each_typed_id<SPIRType>([&](uint32_t other_id, SPIRType &other_type) {
if (other_id == type.self)
return;
auto &other_type = other_id.get<SPIRType>();
if (other_type.type_alias == type.type_alias)
other_type.type_alias = type.self;
}
});
get<SPIRType>(type.type_alias).type_alias = id.get_id();
this->get<SPIRType>(type.type_alias).type_alias = self;
type.type_alias = 0;
}
}
});
for (auto &id : ir.ids)
{
if (id.get_type() != TypeType)
continue;
auto &type = id.get<SPIRType>();
ir.for_each_typed_id<SPIRType>([&](uint32_t, SPIRType &type) {
if (type.type_alias && type_is_block_like(type))
{
// This is not allowed, drop the type_alias.
type.type_alias = 0;
}
});
// Reorder declaration of types so that the master of the type alias is always emitted first.
// We need this in case a type B depends on type A (A must come before in the vector), but A is an alias of a type Abuffer, which
// means declaration of A doesn't happen (yet), and order would be B, ABuffer and not ABuffer, B. Fix this up here.
auto &type_ids = ir.ids_for_type[TypeType];
for (auto alias_itr = begin(type_ids); alias_itr != end(type_ids); ++alias_itr)
{
auto &type = get<SPIRType>(*alias_itr);
if (type.type_alias != 0 && !has_decoration(type.type_alias, DecorationCPacked))
{
// We will skip declaring this type, so make sure the type_alias type comes before.
auto master_itr = find(begin(type_ids), end(type_ids), type.type_alias);
assert(master_itr != end(type_ids));
if (alias_itr < master_itr)
{
// Must also swap the type order for the constant-type joined array.
auto &joined_types = ir.ids_for_constant_or_type;
auto alt_alias_itr = find(begin(joined_types), end(joined_types), *alias_itr);
auto alt_master_itr = find(begin(joined_types), end(joined_types), *master_itr);
assert(alt_alias_itr != end(joined_types));
assert(alt_master_itr != end(joined_types));
swap(*alias_itr, *master_itr);
swap(*alt_alias_itr, *alt_master_itr);
}
}
}
}
void Compiler::parse_fixup()
{
// Figure out specialization constants for work group sizes.
for (auto &id : ir.ids)
for (auto id_ : ir.ids_for_constant_or_variable)
{
auto &id = ir.ids[id_];
if (id.get_type() == TypeConstant)
{
auto &c = id.get<SPIRConstant>();
@ -909,7 +932,7 @@ void Compiler::flatten_interface_block(uint32_t id)
{
auto &var = get<SPIRVariable>(id);
auto &type = get<SPIRType>(var.basetype);
auto &flags = ir.meta.at(type.self).decoration.decoration_flags;
auto &flags = ir.meta[type.self].decoration.decoration_flags;
if (!type.array.empty())
SPIRV_CROSS_THROW("Type is array of UBOs.");
@ -932,7 +955,7 @@ void Compiler::flatten_interface_block(uint32_t id)
SPIRV_CROSS_THROW("Member type cannot be struct.");
// Inherit variable name from interface block name.
ir.meta.at(var.self).decoration.alias = ir.meta.at(type.self).decoration.alias;
ir.meta[var.self].decoration.alias = ir.meta[type.self].decoration.alias;
auto storage = var.storage;
if (storage == StorageClassUniform)
@ -1095,19 +1118,17 @@ const std::string &Compiler::get_member_name(uint32_t id, uint32_t index) const
void Compiler::set_member_qualified_name(uint32_t type_id, uint32_t index, const std::string &name)
{
ir.meta.at(type_id).members.resize(max(ir.meta[type_id].members.size(), size_t(index) + 1));
ir.meta.at(type_id).members[index].qualified_alias = name;
ir.meta[type_id].members.resize(max(ir.meta[type_id].members.size(), size_t(index) + 1));
ir.meta[type_id].members[index].qualified_alias = name;
}
const std::string &Compiler::get_member_qualified_name(uint32_t type_id, uint32_t index) const
const string &Compiler::get_member_qualified_name(uint32_t type_id, uint32_t index) const
{
const static string empty;
auto &m = ir.meta.at(type_id);
if (index < m.members.size())
return m.members[index].qualified_alias;
auto *m = ir.find_meta(type_id);
if (m && index < m->members.size())
return m->members[index].qualified_alias;
else
return empty;
return ir.get_empty_string();
}
uint32_t Compiler::get_member_decoration(uint32_t id, uint32_t index, Decoration decoration) const
@ -1152,7 +1173,7 @@ StorageClass Compiler::get_storage_class(uint32_t id) const
const std::string &Compiler::get_name(uint32_t id) const
{
return ir.meta.at(id).decoration.alias;
return ir.get_name(id);
}
const std::string Compiler::get_fallback_name(uint32_t id) const
@ -1206,7 +1227,11 @@ void Compiler::unset_decoration(uint32_t id, Decoration decoration)
bool Compiler::get_binary_offset_for_decoration(uint32_t id, spv::Decoration decoration, uint32_t &word_offset) const
{
auto &word_offsets = ir.meta.at(id).decoration_word_offset;
auto *m = ir.find_meta(id);
if (!m)
return false;
auto &word_offsets = m->decoration_word_offset;
auto itr = word_offsets.find(decoration);
if (itr == end(word_offsets))
return false;
@ -1434,36 +1459,54 @@ bool Compiler::traverse_all_reachable_opcodes(const SPIRFunction &func, OpcodeHa
}
uint32_t Compiler::type_struct_member_offset(const SPIRType &type, uint32_t index) const
{
auto *type_meta = ir.find_meta(type.self);
if (type_meta)
{
// Decoration must be set in valid SPIR-V, otherwise throw.
auto &dec = ir.meta[type.self].members.at(index);
auto &dec = type_meta->members[index];
if (dec.decoration_flags.get(DecorationOffset))
return dec.offset;
else
SPIRV_CROSS_THROW("Struct member does not have Offset set.");
}
else
SPIRV_CROSS_THROW("Struct member does not have Offset set.");
}
uint32_t Compiler::type_struct_member_array_stride(const SPIRType &type, uint32_t index) const
{
auto *type_meta = ir.find_meta(type.member_types[index]);
if (type_meta)
{
// Decoration must be set in valid SPIR-V, otherwise throw.
// ArrayStride is part of the array type not OpMemberDecorate.
auto &dec = ir.meta[type.member_types[index]].decoration;
auto &dec = type_meta->decoration;
if (dec.decoration_flags.get(DecorationArrayStride))
return dec.array_stride;
else
SPIRV_CROSS_THROW("Struct member does not have ArrayStride set.");
}
else
SPIRV_CROSS_THROW("Struct member does not have Offset set.");
}
uint32_t Compiler::type_struct_member_matrix_stride(const SPIRType &type, uint32_t index) const
{
auto *type_meta = ir.find_meta(type.self);
if (type_meta)
{
// Decoration must be set in valid SPIR-V, otherwise throw.
// MatrixStride is part of OpMemberDecorate.
auto &dec = ir.meta[type.self].members[index];
auto &dec = type_meta->members[index];
if (dec.decoration_flags.get(DecorationMatrixStride))
return dec.matrix_stride;
else
SPIRV_CROSS_THROW("Struct member does not have MatrixStride set.");
}
else
SPIRV_CROSS_THROW("Struct member does not have MatrixStride set.");
}
size_t Compiler::get_declared_struct_size(const SPIRType &type) const
{
@ -1557,7 +1600,7 @@ bool Compiler::BufferAccessHandler::handle(Op opcode, const uint32_t *args, uint
bool ptr_chain = (opcode == OpPtrAccessChain);
// Invalid SPIR-V.
if (length < (ptr_chain ? 5 : 4))
if (length < (ptr_chain ? 5u : 4u))
return false;
if (args[2] != id)
@ -2433,16 +2476,11 @@ uint32_t Compiler::build_dummy_sampler_for_combined_images()
void Compiler::build_combined_image_samplers()
{
for (auto &id : ir.ids)
{
if (id.get_type() == TypeFunction)
{
auto &func = id.get<SPIRFunction>();
ir.for_each_typed_id<SPIRFunction>([&](uint32_t, SPIRFunction &func) {
func.combined_parameters.clear();
func.shadow_arguments.clear();
func.do_combined_parameters = true;
}
}
});
combined_image_samplers.clear();
CombinedImageSamplerHandler handler(*this);
@ -2452,15 +2490,10 @@ void Compiler::build_combined_image_samplers()
vector<SpecializationConstant> Compiler::get_specialization_constants() const
{
vector<SpecializationConstant> spec_consts;
for (auto &id : ir.ids)
{
if (id.get_type() == TypeConstant)
{
auto &c = id.get<SPIRConstant>();
ir.for_each_typed_id<SPIRConstant>([&](uint32_t, const SPIRConstant &c) {
if (c.specialization && has_decoration(c.self, DecorationSpecId))
spec_consts.push_back({ c.self, get_decoration(c.self, DecorationSpecId) });
}
}
});
return spec_consts;
}
@ -2892,12 +2925,6 @@ void Compiler::find_function_local_luts(SPIRFunction &entry, const AnalyzeVariab
if (type.array.empty())
continue;
// HACK: Do not consider structs. This is a quirk with how types are currently being emitted.
// Structs are emitted after specialization constants and composite constants.
// FIXME: Fix declaration order so declared constants can have struct types.
if (type.basetype == SPIRType::Struct)
continue;
// If the variable has an initializer, make sure it is a constant expression.
uint32_t static_constant_expression = 0;
if (var.initializer)
@ -3616,15 +3643,18 @@ bool Compiler::CombinedImageSamplerUsageHandler::handle(Op opcode, const uint32_
bool Compiler::buffer_is_hlsl_counter_buffer(uint32_t id) const
{
return ir.meta.at(id).hlsl_is_magic_counter_buffer;
auto *m = ir.find_meta(id);
return m && m->hlsl_is_magic_counter_buffer;
}
bool Compiler::buffer_get_hlsl_counter_buffer(uint32_t id, uint32_t &counter_id) const
{
auto *m = ir.find_meta(id);
// First, check for the proper decoration.
if (ir.meta[id].hlsl_magic_counter_buffer != 0)
if (m && m->hlsl_magic_counter_buffer != 0)
{
counter_id = ir.meta[id].hlsl_magic_counter_buffer;
counter_id = m->hlsl_magic_counter_buffer;
return true;
}
else
@ -3691,8 +3721,10 @@ std::string Compiler::get_remapped_declared_block_name(uint32_t id) const
{
auto &var = get<SPIRVariable>(id);
auto &type = get<SPIRType>(var.basetype);
auto &block_name = ir.meta[type.self].decoration.alias;
return block_name.empty() ? get_block_fallback_name(id) : block_name;
auto *type_meta = ir.find_meta(type.self);
auto *block_name = type_meta ? &type_meta->decoration.alias : nullptr;
return (!block_name || block_name->empty()) ? get_block_fallback_name(id) : *block_name;
}
}
@ -3739,7 +3771,11 @@ bool Compiler::instruction_to_result_type(uint32_t &result_type, uint32_t &resul
Bitset Compiler::combined_decoration_for_member(const SPIRType &type, uint32_t index) const
{
Bitset flags;
auto &memb = ir.meta[type.self].members;
auto *type_meta = ir.find_meta(type.self);
if (type_meta)
{
auto &memb = type_meta->members;
if (index >= memb.size())
return flags;
auto &dec = memb[index];
@ -3748,6 +3784,7 @@ Bitset Compiler::combined_decoration_for_member(const SPIRType &type, uint32_t i
flags.merge_or(dec.decoration_flags);
for (uint32_t i = 0; i < type.member_types.size(); i++)
flags.merge_or(combined_decoration_for_member(get<SPIRType>(type.member_types[i]), i));
}
return flags;
}

View File

@ -561,7 +561,8 @@ protected:
template <typename T, typename... P>
T &set(uint32_t id, P &&... args)
{
auto &var = variant_set<T>(ir.ids.at(id), std::forward<P>(args)...);
ir.add_typed_id(static_cast<Types>(T::type), id);
auto &var = variant_set<T>(ir.ids[id], std::forward<P>(args)...);
var.self = id;
return var;
}
@ -569,13 +570,13 @@ protected:
template <typename T>
T &get(uint32_t id)
{
return variant_get<T>(ir.ids.at(id));
return variant_get<T>(ir.ids[id]);
}
template <typename T>
T *maybe_get(uint32_t id)
{
if (ir.ids.at(id).get_type() == T::type)
if (ir.ids[id].get_type() == static_cast<Types>(T::type))
return &get<T>(id);
else
return nullptr;
@ -584,13 +585,13 @@ protected:
template <typename T>
const T &get(uint32_t id) const
{
return variant_get<T>(ir.ids.at(id));
return variant_get<T>(ir.ids[id]);
}
template <typename T>
const T *maybe_get(uint32_t id) const
{
if (ir.ids.at(id).get_type() == T::type)
if (ir.ids[id].get_type() == static_cast<Types>(T::type))
return &get<T>(id);
else
return nullptr;

View File

@ -16,6 +16,7 @@
#include "spirv_cross_parsed_ir.hpp"
#include <assert.h>
#include <algorithm>
using namespace std;
using namespace spv;
@ -25,7 +26,6 @@ namespace spirv_cross
void ParsedIR::set_id_bounds(uint32_t bounds)
{
ids.resize(bounds);
meta.resize(bounds);
block_meta.resize(bounds);
}
@ -65,19 +65,24 @@ static string ensure_valid_identifier(const string &name, bool member)
const string &ParsedIR::get_name(uint32_t id) const
{
return meta[id].decoration.alias;
auto *m = find_meta(id);
if (m)
return m->decoration.alias;
else
return empty_string;
}
const string &ParsedIR::get_member_name(uint32_t id, uint32_t index) const
{
auto &m = meta[id];
if (index >= m.members.size())
auto *m = find_meta(id);
if (m)
{
static string empty;
return empty;
if (index >= m->members.size())
return empty_string;
return m->members[index].alias;
}
return m.members[index].alias;
else
return empty_string;
}
void ParsedIR::set_name(uint32_t id, const string &name)
@ -273,7 +278,10 @@ Bitset ParsedIR::get_buffer_block_flags(const SPIRVariable &var) const
// Some flags like non-writable, non-readable are actually found
// as member decorations. If all members have a decoration set, propagate
// the decoration up as a regular variable decoration.
Bitset base_flags = meta[var.self].decoration.decoration_flags;
Bitset base_flags;
auto *m = find_meta(var.self);
if (m)
base_flags = m->decoration.decoration_flags;
if (type.member_types.empty())
return base_flags;
@ -288,14 +296,15 @@ Bitset ParsedIR::get_buffer_block_flags(const SPIRVariable &var) const
const Bitset &ParsedIR::get_member_decoration_bitset(uint32_t id, uint32_t index) const
{
auto &m = meta[id];
if (index >= m.members.size())
auto *m = find_meta(id);
if (m)
{
static const Bitset cleared = {};
return cleared;
if (index >= m->members.size())
return cleared_bitset;
return m->members[index].decoration_flags;
}
return m.members[index].decoration_flags;
else
return cleared_bitset;
}
bool ParsedIR::has_decoration(uint32_t id, Decoration decoration) const
@ -305,7 +314,11 @@ bool ParsedIR::has_decoration(uint32_t id, Decoration decoration) const
uint32_t ParsedIR::get_decoration(uint32_t id, Decoration decoration) const
{
auto &dec = meta[id].decoration;
auto *m = find_meta(id);
if (!m)
return 0;
auto &dec = m->decoration;
if (!dec.decoration_flags.get(decoration))
return 0;
@ -342,11 +355,14 @@ uint32_t ParsedIR::get_decoration(uint32_t id, Decoration decoration) const
const string &ParsedIR::get_decoration_string(uint32_t id, Decoration decoration) const
{
auto &dec = meta[id].decoration;
static const string empty;
auto *m = find_meta(id);
if (!m)
return empty_string;
auto &dec = m->decoration;
if (!dec.decoration_flags.get(decoration))
return empty;
return empty_string;
switch (decoration)
{
@ -354,7 +370,7 @@ const string &ParsedIR::get_decoration_string(uint32_t id, Decoration decoration
return dec.hlsl_semantic;
default:
return empty;
return empty_string;
}
}
@ -427,11 +443,14 @@ bool ParsedIR::has_member_decoration(uint32_t id, uint32_t index, Decoration dec
uint32_t ParsedIR::get_member_decoration(uint32_t id, uint32_t index, Decoration decoration) const
{
auto &m = meta[id];
if (index >= m.members.size())
auto *m = find_meta(id);
if (!m)
return 0;
auto &dec = m.members[index];
if (index >= m->members.size())
return 0;
auto &dec = m->members[index];
if (!dec.decoration_flags.get(decoration))
return 0;
@ -458,9 +477,15 @@ uint32_t ParsedIR::get_member_decoration(uint32_t id, uint32_t index, Decoration
const Bitset &ParsedIR::get_decoration_bitset(uint32_t id) const
{
auto &dec = meta[id].decoration;
auto *m = find_meta(id);
if (m)
{
auto &dec = m->decoration;
return dec.decoration_flags;
}
else
return cleared_bitset;
}
void ParsedIR::set_member_decoration_string(uint32_t id, uint32_t index, Decoration decoration, const string &argument)
{
@ -481,13 +506,13 @@ void ParsedIR::set_member_decoration_string(uint32_t id, uint32_t index, Decorat
const string &ParsedIR::get_member_decoration_string(uint32_t id, uint32_t index, Decoration decoration) const
{
static const string empty;
auto &m = meta[id];
auto *m = find_meta(id);
if (m)
{
if (!has_member_decoration(id, index, decoration))
return empty;
return empty_string;
auto &dec = m.members[index];
auto &dec = m->members[index];
switch (decoration)
{
@ -495,9 +520,12 @@ const string &ParsedIR::get_member_decoration_string(uint32_t id, uint32_t index
return dec.hlsl_semantic;
default:
return empty;
return empty_string;
}
}
else
return empty_string;
}
void ParsedIR::unset_member_decoration(uint32_t id, uint32_t index, Decoration decoration)
{
@ -544,9 +572,77 @@ uint32_t ParsedIR::increase_bound_by(uint32_t incr_amount)
auto curr_bound = ids.size();
auto new_bound = curr_bound + incr_amount;
ids.resize(new_bound);
meta.resize(new_bound);
block_meta.resize(new_bound);
return uint32_t(curr_bound);
}
void ParsedIR::remove_typed_id(Types type, uint32_t id)
{
auto &type_ids = ids_for_type[type];
type_ids.erase(remove(begin(type_ids), end(type_ids), id), end(type_ids));
}
void ParsedIR::reset_all_of_type(Types type)
{
for (auto &id : ids_for_type[type])
if (ids[id].get_type() == type)
ids[id].reset();
ids_for_type[type].clear();
}
void ParsedIR::add_typed_id(Types type, uint32_t id)
{
if (loop_iteration_depth)
SPIRV_CROSS_THROW("Cannot add typed ID while looping over it.");
switch (type)
{
case TypeConstant:
ids_for_constant_or_variable.push_back(id);
ids_for_constant_or_type.push_back(id);
break;
case TypeVariable:
ids_for_constant_or_variable.push_back(id);
break;
case TypeType:
case TypeConstantOp:
ids_for_constant_or_type.push_back(id);
break;
default:
break;
}
if (ids[id].empty())
{
ids_for_type[type].push_back(id);
}
else if (ids[id].get_type() != type)
{
remove_typed_id(ids[id].get_type(), id);
ids_for_type[type].push_back(id);
}
}
const Meta *ParsedIR::find_meta(uint32_t id) const
{
auto itr = meta.find(id);
if (itr != end(meta))
return &itr->second;
else
return nullptr;
}
Meta *ParsedIR::find_meta(uint32_t id)
{
auto itr = meta.find(id);
if (itr != end(meta))
return &itr->second;
else
return nullptr;
}
} // namespace spirv_cross

View File

@ -43,7 +43,19 @@ public:
std::vector<Variant> ids;
// Various meta data for IDs, decorations, names, etc.
std::vector<Meta> meta;
std::unordered_map<uint32_t, Meta> meta;
// Holds all IDs which have a certain type.
// This is needed so we can iterate through a specific kind of resource quickly,
// and in-order of module declaration.
std::vector<uint32_t> ids_for_type[TypeCount];
// Special purpose lists which contain a union of types.
// This is needed so we can declare specialization constants and structs in an interleaved fashion,
// among other things.
// Constants can be of struct type, and struct array sizes can use specialization constants.
std::vector<uint32_t> ids_for_constant_or_type;
std::vector<uint32_t> ids_for_constant_or_variable;
// Declared capabilities and extensions in the SPIR-V module.
// Not really used except for reflection at the moment.
@ -111,6 +123,47 @@ public:
uint32_t increase_bound_by(uint32_t count);
Bitset get_buffer_block_flags(const SPIRVariable &var) const;
void add_typed_id(Types type, uint32_t id);
void remove_typed_id(Types type, uint32_t id);
template <typename T, typename Op>
void for_each_typed_id(const Op &op)
{
loop_iteration_depth++;
for (auto &id : ids_for_type[T::type])
{
if (ids[id].get_type() == static_cast<Types>(T::type))
op(id, get<T>(id));
}
loop_iteration_depth--;
}
template <typename T, typename Op>
void for_each_typed_id(const Op &op) const
{
for (auto &id : ids_for_type[T::type])
{
if (ids[id].get_type() == static_cast<Types>(T::type))
op(id, get<T>(id));
}
}
template <typename T>
void reset_all_of_type()
{
reset_all_of_type(static_cast<Types>(T::type));
}
void reset_all_of_type(Types type);
Meta *find_meta(uint32_t id);
const Meta *find_meta(uint32_t id) const;
const std::string &get_empty_string() const
{
return empty_string;
}
private:
template <typename T>
T &get(uint32_t id)
@ -123,6 +176,10 @@ private:
{
return variant_get<T>(ids[id]);
}
uint32_t loop_iteration_depth = 0;
std::string empty_string;
Bitset cleared_bitset;
};
} // namespace spirv_cross

View File

@ -291,25 +291,17 @@ void CompilerGLSL::reset()
block_names.clear();
function_overloads.clear();
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
// Clear unflushed dependees.
id.get<SPIRVariable>().dependees.clear();
}
else if (id.get_type() == TypeExpression)
{
// And remove all expressions.
id.reset();
}
else if (id.get_type() == TypeFunction)
{
// Reset active state for all functions.
id.get<SPIRFunction>().active = false;
id.get<SPIRFunction>().flush_undeclared = true;
}
}
ir.for_each_typed_id<SPIRFunction>([&](uint32_t, SPIRFunction &func) {
func.active = false;
func.flush_undeclared = true;
});
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
var.dependees.clear();
});
ir.reset_all_of_type<SPIRExpression>();
ir.reset_all_of_type<SPIRAccessChain>();
statement_count = 0;
indent = 0;
@ -344,11 +336,7 @@ void CompilerGLSL::remap_pls_variables()
void CompilerGLSL::find_static_extensions()
{
for (auto &id : ir.ids)
{
if (id.get_type() == TypeType)
{
auto &type = id.get<SPIRType>();
ir.for_each_typed_id<SPIRType>([&](uint32_t, const SPIRType &type) {
if (type.basetype == SPIRType::Double)
{
if (options.es)
@ -373,8 +361,7 @@ void CompilerGLSL::find_static_extensions()
if (type.basetype == SPIRType::Short || type.basetype == SPIRType::UShort)
require_extension_internal("GL_AMD_gpu_shader_int16");
}
}
});
auto &execution = get_entry_point();
switch (execution.model)
@ -978,7 +965,7 @@ uint32_t CompilerGLSL::type_to_packed_alignment(const SPIRType &type, const Bits
uint32_t alignment = 0;
for (uint32_t i = 0; i < type.member_types.size(); i++)
{
auto member_flags = ir.meta[type.self].members.at(i).decoration_flags;
auto member_flags = ir.meta[type.self].members[i].decoration_flags;
alignment =
max(alignment, type_to_packed_alignment(get<SPIRType>(type.member_types[i]), member_flags, packing));
}
@ -1082,7 +1069,7 @@ uint32_t CompilerGLSL::type_to_packed_size(const SPIRType &type, const Bitset &f
for (uint32_t i = 0; i < type.member_types.size(); i++)
{
auto member_flags = ir.meta[type.self].members.at(i).decoration_flags;
auto member_flags = ir.meta[type.self].members[i].decoration_flags;
auto &member_type = get<SPIRType>(type.member_types[i]);
uint32_t packed_alignment = type_to_packed_alignment(member_type, member_flags, packing);
@ -1155,7 +1142,7 @@ bool CompilerGLSL::buffer_is_packing_standard(const SPIRType &type, BufferPackin
for (uint32_t i = 0; i < type.member_types.size(); i++)
{
auto &memb_type = get<SPIRType>(type.member_types[i]);
auto member_flags = ir.meta[type.self].members.at(i).decoration_flags;
auto member_flags = ir.meta[type.self].members[i].decoration_flags;
// Verify alignment rules.
uint32_t packed_alignment = type_to_packed_alignment(memb_type, member_flags, packing);
@ -1902,19 +1889,14 @@ void CompilerGLSL::replace_illegal_names()
};
// clang-format on
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, const SPIRVariable &var) {
if (!is_hidden_variable(var))
{
auto &m = ir.meta[var.self].decoration;
if (m.alias.compare(0, 3, "gl_") == 0 || keywords.find(m.alias) != end(keywords))
m.alias = join("_", m.alias);
}
}
}
});
}
void CompilerGLSL::replace_fragment_output(SPIRVariable &var)
@ -1957,18 +1939,13 @@ void CompilerGLSL::replace_fragment_output(SPIRVariable &var)
void CompilerGLSL::replace_fragment_outputs()
{
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
auto &type = this->get<SPIRType>(var.basetype);
if (!is_builtin_variable(var) && !var.remapped_variable && type.pointer &&
var.storage == StorageClassOutput)
replace_fragment_output(var);
}
}
});
}
string CompilerGLSL::remap_swizzle(const SPIRType &out_type, uint32_t input_components, const string &expr)
@ -2027,12 +2004,7 @@ void CompilerGLSL::emit_pls()
void CompilerGLSL::fixup_image_load_store_access()
{
for (auto &id : ir.ids)
{
if (id.get_type() != TypeVariable)
continue;
uint32_t var = id.get<SPIRVariable>().self;
ir.for_each_typed_id<SPIRVariable>([&](uint32_t var, const SPIRVariable &) {
auto &vartype = expression_type(var);
if (vartype.basetype == SPIRType::Image)
{
@ -2040,14 +2012,14 @@ void CompilerGLSL::fixup_image_load_store_access()
// Solve this by making the image access as restricted as possible and loosen up if we need to.
// If any no-read/no-write flags are actually set, assume that the compiler knows what it's doing.
auto &flags = ir.meta.at(var).decoration.decoration_flags;
auto &flags = ir.meta[var].decoration.decoration_flags;
if (!flags.get(DecorationNonWritable) && !flags.get(DecorationNonReadable))
{
flags.set(DecorationNonWritable);
flags.set(DecorationNonReadable);
}
}
}
});
}
void CompilerGLSL::emit_declared_builtin_block(StorageClass storage, ExecutionModel model)
@ -2063,13 +2035,8 @@ void CompilerGLSL::emit_declared_builtin_block(StorageClass storage, ExecutionMo
uint32_t cull_distance_size = 0;
uint32_t clip_distance_size = 0;
for (auto &id : ir.ids)
{
if (id.get_type() != TypeVariable)
continue;
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
auto &type = this->get<SPIRType>(var.basetype);
bool block = has_decoration(type.self, DecorationBlock);
Bitset builtins;
@ -2082,9 +2049,9 @@ void CompilerGLSL::emit_declared_builtin_block(StorageClass storage, ExecutionMo
{
builtins.set(m.builtin_type);
if (m.builtin_type == BuiltInCullDistance)
cull_distance_size = get<SPIRType>(type.member_types[index]).array.front();
cull_distance_size = this->get<SPIRType>(type.member_types[index]).array.front();
else if (m.builtin_type == BuiltInClipDistance)
clip_distance_size = get<SPIRType>(type.member_types[index]).array.front();
clip_distance_size = this->get<SPIRType>(type.member_types[index]).array.front();
}
index++;
}
@ -2104,7 +2071,7 @@ void CompilerGLSL::emit_declared_builtin_block(StorageClass storage, ExecutionMo
}
if (builtins.empty())
continue;
return;
if (emitted_block)
SPIRV_CROSS_THROW("Cannot use more than one builtin I/O block.");
@ -2113,7 +2080,7 @@ void CompilerGLSL::emit_declared_builtin_block(StorageClass storage, ExecutionMo
emitted_block = true;
builtin_array = !type.array.empty();
block_var = &var;
}
});
global_builtins =
Bitset(global_builtins.get_lower() & ((1ull << BuiltInPosition) | (1ull << BuiltInPointSize) |
@ -2164,15 +2131,10 @@ void CompilerGLSL::emit_declared_builtin_block(StorageClass storage, ExecutionMo
void CompilerGLSL::declare_undefined_values()
{
bool emitted = false;
for (auto &id : ir.ids)
{
if (id.get_type() != TypeUndef)
continue;
auto &undef = id.get<SPIRUndef>();
statement(variable_decl(get<SPIRType>(undef.basetype), to_name(undef.self), undef.self), ";");
ir.for_each_typed_id<SPIRUndef>([&](uint32_t, const SPIRUndef &undef) {
statement(variable_decl(this->get<SPIRType>(undef.basetype), to_name(undef.self), undef.self), ";");
emitted = true;
}
});
if (emitted)
statement("");
@ -2238,10 +2200,10 @@ void CompilerGLSL::emit_resources()
// emit specialization constants as actual floats,
// spec op expressions will redirect to the constant name.
//
// TODO: If we have the fringe case that we create a spec constant which depends on a struct type,
// we'll have to deal with that, but there's currently no known way to express that.
for (auto &id : ir.ids)
for (auto &id_ : ir.ids_for_constant_or_type)
{
auto &id = ir.ids[id_];
if (id.get_type() == TypeConstant)
{
auto &c = id.get<SPIRConstant>();
@ -2264,6 +2226,20 @@ void CompilerGLSL::emit_resources()
emit_specialization_constant_op(id.get<SPIRConstantOp>());
emitted = true;
}
else if (id.get_type() == TypeType)
{
auto &type = id.get<SPIRType>();
if (type.basetype == SPIRType::Struct && type.array.empty() && !type.pointer &&
(!ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) &&
!ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock)))
{
if (emitted)
statement("");
emitted = false;
emit_struct(type);
}
}
}
if (emitted)
@ -2289,29 +2265,9 @@ void CompilerGLSL::emit_resources()
emitted = false;
// Output all basic struct types which are not Block or BufferBlock as these are declared inplace
// when such variables are instantiated.
for (auto &id : ir.ids)
{
if (id.get_type() == TypeType)
{
auto &type = id.get<SPIRType>();
if (type.basetype == SPIRType::Struct && type.array.empty() && !type.pointer &&
(!ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) &&
!ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock)))
{
emit_struct(type);
}
}
}
// Output UBOs and SSBOs
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
auto &type = this->get<SPIRType>(var.basetype);
bool is_block_storage = type.storage == StorageClassStorageBuffer || type.storage == StorageClassUniform;
bool has_block_flags = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) ||
@ -2322,33 +2278,23 @@ void CompilerGLSL::emit_resources()
{
emit_buffer_block(var);
}
}
}
});
// Output push constant blocks
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
auto &type = this->get<SPIRType>(var.basetype);
if (var.storage != StorageClassFunction && type.pointer && type.storage == StorageClassPushConstant &&
!is_hidden_variable(var))
{
emit_push_constant_block(var);
}
}
}
});
bool skip_separate_image_sampler = !combined_image_samplers.empty() || !options.vulkan_semantics;
// Output Uniform Constants (values, samplers, images, etc).
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
auto &type = this->get<SPIRType>(var.basetype);
// If we're remapping separate samplers and images, only emit the combined samplers.
if (skip_separate_image_sampler)
@ -2358,7 +2304,7 @@ void CompilerGLSL::emit_resources()
bool separate_image = type.basetype == SPIRType::Image && type.image.sampled == 1;
bool separate_sampler = type.basetype == SPIRType::Sampler;
if (!sampler_buffer && (separate_image || separate_sampler))
continue;
return;
}
if (var.storage != StorageClassFunction && type.pointer &&
@ -2368,20 +2314,15 @@ void CompilerGLSL::emit_resources()
emit_uniform(var);
emitted = true;
}
}
}
});
if (emitted)
statement("");
emitted = false;
// Output in/out interfaces.
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
auto &type = this->get<SPIRType>(var.basetype);
if (var.storage != StorageClassFunction && type.pointer &&
(var.storage == StorageClassInput || var.storage == StorageClassOutput) &&
@ -2401,8 +2342,7 @@ void CompilerGLSL::emit_resources()
emitted = true;
}
}
}
}
});
// Global variables.
for (auto global : global_variables)
@ -2526,7 +2466,7 @@ string CompilerGLSL::dereference_expression(const std::string &expr)
// If this expression starts with an address-of operator ('&'), then
// just return the part after the operator.
// TODO: Strip parens if unnecessary?
if (expr.at(0) == '&')
if (expr.front() == '&')
return expr.substr(1);
else
return join('*', expr);
@ -2537,7 +2477,7 @@ string CompilerGLSL::address_of_expression(const std::string &expr)
// If this expression starts with a dereference operator ('*'), then
// just return the part after the operator.
// TODO: Strip parens if unnecessary?
if (expr.at(0) == '*')
if (expr.front() == '*')
return expr.substr(1);
else
return join('&', expr);
@ -8135,7 +8075,7 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
auto *var = maybe_get_backing_variable(ops[2]);
if (var)
{
auto &flags = ir.meta.at(var->self).decoration.decoration_flags;
auto &flags = ir.meta[var->self].decoration.decoration_flags;
if (flags.get(DecorationNonReadable))
{
flags.clear(DecorationNonReadable);
@ -8283,7 +8223,7 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
auto *var = maybe_get_backing_variable(ops[0]);
if (var)
{
auto &flags = ir.meta.at(var->self).decoration.decoration_flags;
auto &flags = ir.meta[var->self].decoration.decoration_flags;
if (flags.get(DecorationNonWritable))
{
flags.clear(DecorationNonWritable);
@ -9473,7 +9413,7 @@ void CompilerGLSL::flatten_buffer_block(uint32_t id)
auto &var = get<SPIRVariable>(id);
auto &type = get<SPIRType>(var.basetype);
auto name = to_name(type.self, false);
auto &flags = ir.meta.at(type.self).decoration.decoration_flags;
auto &flags = ir.meta[type.self].decoration.decoration_flags;
if (!type.array.empty())
SPIRV_CROSS_THROW(name + " is an array of UBOs.");
@ -9498,7 +9438,7 @@ bool CompilerGLSL::check_atomic_image(uint32_t id)
auto *var = maybe_get_backing_variable(id);
if (var)
{
auto &flags = ir.meta.at(var->self).decoration.decoration_flags;
auto &flags = ir.meta[var->self].decoration.decoration_flags;
if (flags.get(DecorationNonWritable) || flags.get(DecorationNonReadable))
{
flags.clear(DecorationNonWritable);
@ -10607,7 +10547,7 @@ void CompilerGLSL::emit_block_chain(SPIRBlock &block)
{
// If we cannot return arrays, we will have a special out argument we can write to instead.
// The backend is responsible for setting this up, and redirection the return values as appropriate.
if (ir.ids.at(block.return_value).get_type() != TypeUndef)
if (ir.ids[block.return_value].get_type() != TypeUndef)
emit_array_copy("SPIRV_Cross_return_value", block.return_value);
if (!block_is_outside_flow_control_from_block(get<SPIRBlock>(current_function->entry_block), block) ||
@ -10619,7 +10559,7 @@ void CompilerGLSL::emit_block_chain(SPIRBlock &block)
else
{
// OpReturnValue can return Undef, so don't emit anything for this case.
if (ir.ids.at(block.return_value).get_type() != TypeUndef)
if (ir.ids[block.return_value].get_type() != TypeUndef)
statement("return ", to_expression(block.return_value), ";");
}
}

View File

@ -1006,36 +1006,33 @@ void CompilerHLSL::emit_composite_constants()
// global constants directly.
bool emitted = false;
for (auto &id : ir.ids)
{
if (id.get_type() == TypeConstant)
{
auto &c = id.get<SPIRConstant>();
ir.for_each_typed_id<SPIRConstant>([&](uint32_t, SPIRConstant &c) {
if (c.specialization)
continue;
return;
auto &type = get<SPIRType>(c.constant_type);
auto &type = this->get<SPIRType>(c.constant_type);
if (type.basetype == SPIRType::Struct || !type.array.empty())
{
auto name = to_name(c.self);
statement("static const ", variable_decl(type, name), " = ", constant_expression(c), ";");
emitted = true;
}
}
}
});
if (emitted)
statement("");
}
void CompilerHLSL::emit_specialization_constants()
void CompilerHLSL::emit_specialization_constants_and_structs()
{
bool emitted = false;
SpecializationConstant wg_x, wg_y, wg_z;
uint32_t workgroup_size_id = get_work_group_size_specialization_constants(wg_x, wg_y, wg_z);
for (auto &id : ir.ids)
for (auto &id_ : ir.ids_for_constant_or_type)
{
auto &id = ir.ids[id_];
if (id.get_type() == TypeConstant)
{
auto &c = id.get<SPIRConstant>();
@ -1070,6 +1067,20 @@ void CompilerHLSL::emit_specialization_constants()
statement("static const ", variable_decl(type, name), " = ", constant_op_expression(c), ";");
emitted = true;
}
else if (id.get_type() == TypeType)
{
auto &type = id.get<SPIRType>();
if (type.basetype == SPIRType::Struct && type.array.empty() && !type.pointer &&
(!ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) &&
!ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock)))
{
if (emitted)
statement("");
emitted = false;
emit_struct(type);
}
}
}
if (emitted)
@ -1083,19 +1094,14 @@ void CompilerHLSL::replace_illegal_names()
"line", "linear", "matrix", "point", "row_major", "sampler",
};
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
if (!is_hidden_variable(var))
{
auto &m = ir.meta[var.self].decoration;
if (keywords.find(m.alias) != end(keywords))
m.alias = join("_", m.alias);
}
}
}
});
CompilerGLSL::replace_illegal_names();
}
@ -1106,35 +1112,14 @@ void CompilerHLSL::emit_resources()
replace_illegal_names();
emit_specialization_constants();
// Output all basic struct types which are not Block or BufferBlock as these are declared inplace
// when such variables are instantiated.
for (auto &id : ir.ids)
{
if (id.get_type() == TypeType)
{
auto &type = id.get<SPIRType>();
if (type.basetype == SPIRType::Struct && type.array.empty() && !type.pointer &&
(!ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) &&
!ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock)))
{
emit_struct(type);
}
}
}
emit_specialization_constants_and_structs();
emit_composite_constants();
bool emitted = false;
// Output UBOs and SSBOs
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
auto &type = this->get<SPIRType>(var.basetype);
bool is_block_storage = type.storage == StorageClassStorageBuffer || type.storage == StorageClassUniform;
bool has_block_flags = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) ||
@ -1146,24 +1131,18 @@ void CompilerHLSL::emit_resources()
emit_buffer_block(var);
emitted = true;
}
}
}
});
// Output push constant blocks
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
auto &type = this->get<SPIRType>(var.basetype);
if (var.storage != StorageClassFunction && type.pointer && type.storage == StorageClassPushConstant &&
!is_hidden_variable(var))
{
emit_push_constant_block(var);
emitted = true;
}
}
}
});
if (execution.model == ExecutionModelVertex && hlsl_options.shader_model <= 30)
{
@ -1174,12 +1153,8 @@ void CompilerHLSL::emit_resources()
bool skip_separate_image_sampler = !combined_image_samplers.empty() || hlsl_options.shader_model <= 30;
// Output Uniform Constants (values, samplers, images, etc).
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
auto &type = this->get<SPIRType>(var.basetype);
// If we're remapping separate samplers and images, only emit the combined samplers.
if (skip_separate_image_sampler)
@ -1189,7 +1164,7 @@ void CompilerHLSL::emit_resources()
bool separate_image = type.basetype == SPIRType::Image && type.image.sampled == 1;
bool separate_sampler = type.basetype == SPIRType::Sampler;
if (!sampler_buffer && (separate_image || separate_sampler))
continue;
return;
}
if (var.storage != StorageClassFunction && !is_builtin_variable(var) && !var.remapped_variable &&
@ -1199,8 +1174,7 @@ void CompilerHLSL::emit_resources()
emit_uniform(var);
emitted = true;
}
}
}
});
if (emitted)
statement("");
@ -1209,12 +1183,8 @@ void CompilerHLSL::emit_resources()
// Emit builtin input and output variables here.
emit_builtin_variables();
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
auto &type = this->get<SPIRType>(var.basetype);
bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock);
// Do not emit I/O blocks here.
@ -1228,8 +1198,7 @@ void CompilerHLSL::emit_resources()
emit_interface_block_globally(var);
emitted = true;
}
}
}
});
if (emitted)
statement("");
@ -1241,16 +1210,12 @@ void CompilerHLSL::emit_resources()
unordered_set<uint32_t> active_outputs;
vector<SPIRVariable *> input_variables;
vector<SPIRVariable *> output_variables;
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
auto &type = this->get<SPIRType>(var.basetype);
bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock);
if (var.storage != StorageClassInput && var.storage != StorageClassOutput)
continue;
return;
// Do not emit I/O blocks here.
// I/O blocks can be arrayed, so we must deal with them separately to support geometry shaders
@ -1280,8 +1245,7 @@ void CompilerHLSL::emit_resources()
// Emit the block struct and a global variable here.
emit_io_block(var);
}
}
}
});
const auto variable_compare = [&](const SPIRVariable *a, const SPIRVariable *b) -> bool {
// Sort input and output variables based on, from more robust to less robust:
@ -2169,16 +2133,12 @@ void CompilerHLSL::emit_hlsl_entry_point()
arguments.push_back("SPIRV_Cross_Input stage_input");
// Add I/O blocks as separate arguments with appropriate storage qualifier.
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
auto &type = this->get<SPIRType>(var.basetype);
bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock);
if (var.storage != StorageClassInput && var.storage != StorageClassOutput)
continue;
return;
if (block && !is_builtin_variable(var) && interface_variable_exists_in_entry_point(var.self))
{
@ -2191,8 +2151,7 @@ void CompilerHLSL::emit_hlsl_entry_point()
arguments.push_back(join("out ", variable_decl(type, join("stage_output", to_name(var.self)))));
}
}
}
}
});
auto &execution = get_entry_point();
@ -2336,16 +2295,12 @@ void CompilerHLSL::emit_hlsl_entry_point()
});
// Copy from stage input struct to globals.
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
auto &type = this->get<SPIRType>(var.basetype);
bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock);
if (var.storage != StorageClassInput)
continue;
return;
bool need_matrix_unroll = var.storage == StorageClassInput && execution.model == ExecutionModelVertex;
@ -2353,7 +2308,7 @@ void CompilerHLSL::emit_hlsl_entry_point()
interface_variable_exists_in_entry_point(var.self))
{
auto name = to_name(var.self);
auto &mtype = get<SPIRType>(var.basetype);
auto &mtype = this->get<SPIRType>(var.basetype);
if (need_matrix_unroll && mtype.columns > 1)
{
// Unroll matrices.
@ -2372,8 +2327,7 @@ void CompilerHLSL::emit_hlsl_entry_point()
auto name = to_name(var.self);
statement(name, " = stage_input", name, ";");
}
}
}
});
// Run the shader.
if (execution.model == ExecutionModelVertex)
@ -2386,16 +2340,12 @@ void CompilerHLSL::emit_hlsl_entry_point()
SPIRV_CROSS_THROW("Unsupported shader stage.");
// Copy block outputs.
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
auto &type = this->get<SPIRType>(var.basetype);
bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock);
if (var.storage != StorageClassOutput)
continue;
return;
// I/O blocks don't use the common stage input/output struct, but separate outputs.
if (block && !is_builtin_variable(var) && interface_variable_exists_in_entry_point(var.self))
@ -2403,8 +2353,7 @@ void CompilerHLSL::emit_hlsl_entry_point()
auto name = to_name(var.self);
statement("stage_output", name, " = ", name, ";");
}
}
}
});
// Copy stage outputs.
if (require_output)
@ -2440,16 +2389,12 @@ void CompilerHLSL::emit_hlsl_entry_point()
}
});
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
auto &type = this->get<SPIRType>(var.basetype);
bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock);
if (var.storage != StorageClassOutput)
continue;
return;
if (!block && var.storage != StorageClassFunction && !var.remapped_variable && type.pointer &&
!is_builtin_variable(var) && interface_variable_exists_in_entry_point(var.self))
@ -2469,8 +2414,7 @@ void CompilerHLSL::emit_hlsl_entry_point()
statement("stage_output.", name, " = ", name, ";");
}
}
}
}
});
statement("return stage_output;");
}

View File

@ -148,7 +148,7 @@ private:
void emit_uniform(const SPIRVariable &var) override;
void emit_modern_uniform(const SPIRVariable &var);
void emit_legacy_uniform(const SPIRVariable &var);
void emit_specialization_constants();
void emit_specialization_constants_and_structs();
void emit_composite_constants();
void emit_fixup() override;
std::string builtin_to_glsl(spv::BuiltIn builtin, spv::StorageClass storage) override;

View File

@ -90,19 +90,12 @@ void CompilerMSL::build_implicit_builtins()
bool has_frag_coord = false;
bool has_sample_id = false;
for (auto &id : ir.ids)
{
if (id.get_type() != TypeVariable)
continue;
auto &var = id.get<SPIRVariable>();
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
if (need_subpass_input && var.storage == StorageClassInput && ir.meta[var.self].decoration.builtin &&
ir.meta[var.self].decoration.builtin_type == BuiltInFragCoord)
{
builtin_frag_coord_id = var.self;
has_frag_coord = true;
break;
}
if (need_sample_pos && var.storage == StorageClassInput && ir.meta[var.self].decoration.builtin &&
@ -110,9 +103,8 @@ void CompilerMSL::build_implicit_builtins()
{
builtin_sample_id_id = var.self;
has_sample_id = true;
break;
}
}
});
if (!has_frag_coord && need_subpass_input)
{
@ -464,7 +456,7 @@ string CompilerMSL::compile()
buffer = unique_ptr<ostringstream>(new ostringstream());
emit_header();
emit_specialization_constants();
emit_specialization_constants_and_structs();
emit_resources();
emit_custom_functions();
emit_function(get<SPIRFunction>(ir.default_entry_point), Bitset());
@ -547,19 +539,14 @@ void CompilerMSL::extract_global_variables_from_functions()
{
// Uniforms
unordered_set<uint32_t> global_var_ids;
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
if (var.storage == StorageClassInput || var.storage == StorageClassOutput ||
var.storage == StorageClassUniform || var.storage == StorageClassUniformConstant ||
var.storage == StorageClassPushConstant || var.storage == StorageClassStorageBuffer)
{
global_var_ids.insert(var.self);
}
}
}
});
// Local vars that are declared in the main function and accessed directly by a function
auto &entry_func = get<SPIRFunction>(ir.default_entry_point);
@ -739,22 +726,17 @@ void CompilerMSL::extract_global_variables_from_function(uint32_t func_id, std::
// that are recursively contained within the type referenced by that variable should be packed tightly.
void CompilerMSL::mark_packable_structs()
{
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
if (var.storage != StorageClassFunction && !is_hidden_variable(var))
{
auto &type = get<SPIRType>(var.basetype);
auto &type = this->get<SPIRType>(var.basetype);
if (type.pointer &&
(type.storage == StorageClassUniform || type.storage == StorageClassUniformConstant ||
type.storage == StorageClassPushConstant || type.storage == StorageClassStorageBuffer) &&
(has_decoration(type.self, DecorationBlock) || has_decoration(type.self, DecorationBufferBlock)))
mark_as_packable(type);
}
}
}
});
}
// If the specified type is a struct, it and any nested structs
@ -1273,19 +1255,15 @@ uint32_t CompilerMSL::add_interface_block(StorageClass storage)
// Accumulate the variables that should appear in the interface struct
vector<SPIRVariable *> vars;
bool incl_builtins = (storage == StorageClassOutput);
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
auto &type = this->get<SPIRType>(var.basetype);
if (var.storage == storage && interface_variable_exists_in_entry_point(var.self) &&
!is_hidden_variable(var, incl_builtins) && type.pointer)
{
vars.push_back(&var);
}
}
}
});
// If no variables qualify, leave
if (vars.empty())
@ -2117,16 +2095,11 @@ void CompilerMSL::emit_custom_functions()
void CompilerMSL::declare_undefined_values()
{
bool emitted = false;
for (auto &id : ir.ids)
{
if (id.get_type() == TypeUndef)
{
auto &undef = id.get<SPIRUndef>();
auto &type = get<SPIRType>(undef.basetype);
ir.for_each_typed_id<SPIRUndef>([&](uint32_t, SPIRUndef &undef) {
auto &type = this->get<SPIRType>(undef.basetype);
statement("constant ", variable_decl(type, to_name(undef.self), undef.self), " = {};");
emitted = true;
}
}
});
if (emitted)
statement("");
@ -2138,23 +2111,18 @@ void CompilerMSL::declare_constant_arrays()
// global constants directly, so we are able to use constants as variable expressions.
bool emitted = false;
for (auto &id : ir.ids)
{
if (id.get_type() == TypeConstant)
{
auto &c = id.get<SPIRConstant>();
ir.for_each_typed_id<SPIRConstant>([&](uint32_t, SPIRConstant &c) {
if (c.specialization)
continue;
return;
auto &type = get<SPIRType>(c.constant_type);
auto &type = this->get<SPIRType>(c.constant_type);
if (!type.array.empty())
{
auto name = to_name(c.self);
statement("constant ", variable_decl(type, name), " = ", constant_expression(c), ";");
emitted = true;
}
}
}
});
if (emitted)
statement("");
@ -2162,42 +2130,6 @@ void CompilerMSL::declare_constant_arrays()
void CompilerMSL::emit_resources()
{
// Output non-builtin interface structs. These include local function structs
// and structs nested within uniform and read-write buffers.
unordered_set<uint32_t> declared_structs;
for (auto &id : ir.ids)
{
if (id.get_type() == TypeType)
{
auto &type = id.get<SPIRType>();
uint32_t type_id = type.self;
bool is_struct = (type.basetype == SPIRType::Struct) && type.array.empty();
bool is_block =
has_decoration(type.self, DecorationBlock) || has_decoration(type.self, DecorationBufferBlock);
bool is_builtin_block = is_block && is_builtin_type(type);
bool is_declarable_struct = is_struct && !is_builtin_block;
// We'll declare this later.
if (stage_out_var_id && get<SPIRVariable>(stage_out_var_id).basetype == type_id)
is_declarable_struct = false;
if (stage_in_var_id && get<SPIRVariable>(stage_in_var_id).basetype == type_id)
is_declarable_struct = false;
// Align and emit declarable structs...but avoid declaring each more than once.
if (is_declarable_struct && declared_structs.count(type_id) == 0)
{
declared_structs.insert(type_id);
if (has_decoration(type_id, DecorationCPacked))
align_struct(type);
emit_struct(type);
}
}
}
declare_constant_arrays();
declare_undefined_values();
@ -2207,14 +2139,18 @@ void CompilerMSL::emit_resources()
}
// Emit declarations for the specialization Metal function constants
void CompilerMSL::emit_specialization_constants()
void CompilerMSL::emit_specialization_constants_and_structs()
{
SpecializationConstant wg_x, wg_y, wg_z;
uint32_t workgroup_size_id = get_work_group_size_specialization_constants(wg_x, wg_y, wg_z);
bool emitted = false;
for (auto &id : ir.ids)
unordered_set<uint32_t> declared_structs;
for (auto &id_ : ir.ids_for_constant_or_type)
{
auto &id = ir.ids[id_];
if (id.get_type() == TypeConstant)
{
auto &c = id.get<SPIRConstant>();
@ -2278,6 +2214,42 @@ void CompilerMSL::emit_specialization_constants()
statement("constant ", variable_decl(type, name), " = ", constant_op_expression(c), ";");
emitted = true;
}
else if (id.get_type() == TypeType)
{
// Output non-builtin interface structs. These include local function structs
// and structs nested within uniform and read-write buffers.
auto &type = id.get<SPIRType>();
uint32_t type_id = type.self;
bool is_struct = (type.basetype == SPIRType::Struct) && type.array.empty();
bool is_block =
has_decoration(type.self, DecorationBlock) || has_decoration(type.self, DecorationBufferBlock);
bool is_builtin_block = is_block && is_builtin_type(type);
bool is_declarable_struct = is_struct && !is_builtin_block;
// We'll declare this later.
if (stage_out_var_id && get<SPIRVariable>(stage_out_var_id).basetype == type_id)
is_declarable_struct = false;
if (stage_in_var_id && get<SPIRVariable>(stage_in_var_id).basetype == type_id)
is_declarable_struct = false;
// Align and emit declarable structs...but avoid declaring each more than once.
if (is_declarable_struct && declared_structs.count(type_id) == 0)
{
if (emitted)
statement("");
emitted = false;
declared_structs.insert(type_id);
if (has_decoration(type_id, DecorationCPacked))
align_struct(type);
// Make sure we declare the underlying struct type, and not the "decorated" type with pointers, etc.
emit_struct(get<SPIRType>(type_id));
}
}
}
if (emitted)
@ -4099,7 +4071,7 @@ string CompilerMSL::member_attribute_qualifier(const SPIRType &type, uint32_t in
// index as the location.
uint32_t CompilerMSL::get_ordered_member_location(uint32_t type_id, uint32_t index, uint32_t *comp)
{
auto &m = ir.meta.at(type_id);
auto &m = ir.meta[type_id];
if (index < m.members.size())
{
auto &dec = m.members[index];
@ -4270,11 +4242,8 @@ string CompilerMSL::entry_point_args(bool append_comma)
vector<Resource> resources;
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
ir.for_each_typed_id<SPIRVariable>([&](uint32_t self, SPIRVariable &var) {
auto &id = ir.ids[self];
auto &type = get_variable_data_type(var);
uint32_t var_id = var.self;
@ -4301,8 +4270,7 @@ string CompilerMSL::entry_point_args(bool append_comma)
{ &id, to_name(var_id), type.basetype, get_metal_resource_index(var, type.basetype) });
}
}
}
}
});
std::sort(resources.begin(), resources.end(), [](const Resource &lhs, const Resource &rhs) {
return tie(lhs.basetype, lhs.index) < tie(rhs.basetype, rhs.index);
@ -4319,7 +4287,7 @@ string CompilerMSL::entry_point_args(bool append_comma)
{
case SPIRType::Struct:
{
auto &m = ir.meta.at(type.self);
auto &m = ir.meta[type.self];
if (m.members.size() == 0)
break;
if (!type.array.empty())
@ -4373,12 +4341,7 @@ string CompilerMSL::entry_point_args(bool append_comma)
}
// Builtin variables
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
uint32_t var_id = var.self;
BuiltIn bi_type = ir.meta[var_id].decoration.builtin_type;
@ -4388,7 +4351,7 @@ string CompilerMSL::entry_point_args(bool append_comma)
{
if (bi_type == BuiltInSamplePosition)
{
auto &entry_func = get<SPIRFunction>(ir.default_entry_point);
auto &entry_func = this->get<SPIRFunction>(ir.default_entry_point);
entry_func.fixup_hooks_in.push_back([=]() {
statement(builtin_type_decl(bi_type), " ", to_expression(var_id), " = get_sample_position(",
to_expression(builtin_sample_id_id), ");");
@ -4401,7 +4364,7 @@ string CompilerMSL::entry_point_args(bool append_comma)
else if (msl_options.is_macos() && !msl_options.supports_msl_version(2, 1))
SPIRV_CROSS_THROW("simd_is_helper_thread() requires version 2.1 on macOS.");
auto &entry_func = get<SPIRFunction>(ir.default_entry_point);
auto &entry_func = this->get<SPIRFunction>(ir.default_entry_point);
entry_func.fixup_hooks_in.push_back([=]() {
statement(builtin_type_decl(bi_type), " ", to_expression(var_id),
" = simd_is_helper_thread();");
@ -4416,8 +4379,7 @@ string CompilerMSL::entry_point_args(bool append_comma)
ep_args += " [[" + builtin_qualifier(bi_type) + "]]";
}
}
}
}
});
// Vertex and instance index built-ins
if (needs_vertex_idx_arg)
@ -4567,9 +4529,9 @@ string CompilerMSL::to_name(uint32_t id, bool allow_alias) const
{
if (current_function && (current_function->self == ir.default_entry_point))
{
string qual_name = ir.meta.at(id).decoration.qualified_alias;
if (!qual_name.empty())
return qual_name;
auto *m = ir.find_meta(id);
if (m && !m->decoration.qualified_alias.empty())
return m->decoration.qualified_alias;
}
return Compiler::to_name(id, allow_alias);
}
@ -4610,41 +4572,23 @@ void CompilerMSL::replace_illegal_names()
"saturate",
};
for (auto &id : ir.ids)
{
switch (id.get_type())
{
case TypeVariable:
{
auto &dec = ir.meta[id.get_id()].decoration;
ir.for_each_typed_id<SPIRVariable>([&](uint32_t self, SPIRVariable &) {
auto &dec = ir.meta[self].decoration;
if (keywords.find(dec.alias) != end(keywords))
dec.alias += "0";
});
break;
}
case TypeFunction:
{
auto &dec = ir.meta[id.get_id()].decoration;
ir.for_each_typed_id<SPIRFunction>([&](uint32_t self, SPIRFunction &) {
auto &dec = ir.meta[self].decoration;
if (illegal_func_names.find(dec.alias) != end(illegal_func_names))
dec.alias += "0";
});
break;
}
case TypeType:
{
for (auto &mbr_dec : ir.meta[id.get_id()].members)
ir.for_each_typed_id<SPIRType>([&](uint32_t self, SPIRType &) {
for (auto &mbr_dec : ir.meta[self].members)
if (keywords.find(mbr_dec.alias) != end(keywords))
mbr_dec.alias += "0";
break;
}
default:
break;
}
}
});
for (auto &entry : ir.entry_points)
{

View File

@ -380,7 +380,7 @@ protected:
void emit_custom_functions();
void emit_resources();
void emit_specialization_constants();
void emit_specialization_constants_and_structs();
void emit_interface_block(uint32_t ib_var_id);
bool maybe_emit_array_assignment(uint32_t id_lhs, uint32_t id_rhs);
void add_convert_row_major_matrix_function(uint32_t cols, uint32_t rows);

View File

@ -1063,8 +1063,11 @@ bool Parser::types_are_logically_equivalent(const SPIRType &a, const SPIRType &b
bool Parser::variable_storage_is_aliased(const SPIRVariable &v) const
{
auto &type = get<SPIRType>(v.basetype);
auto *type_meta = ir.find_meta(type.self);
bool ssbo = v.storage == StorageClassStorageBuffer ||
ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
(type_meta && type_meta->decoration.decoration_flags.get(DecorationBufferBlock));
bool image = type.basetype == SPIRType::Image;
bool counter = type.basetype == SPIRType::AtomicCounter;

View File

@ -47,7 +47,8 @@ private:
template <typename T, typename... P>
T &set(uint32_t id, P &&... args)
{
auto &var = variant_set<T>(ir.ids.at(id), std::forward<P>(args)...);
ir.add_typed_id(static_cast<Types>(T::type), id);
auto &var = variant_set<T>(ir.ids[id], std::forward<P>(args)...);
var.self = id;
return var;
}
@ -55,13 +56,13 @@ private:
template <typename T>
T &get(uint32_t id)
{
return variant_get<T>(ir.ids.at(id));
return variant_get<T>(ir.ids[id]);
}
template <typename T>
T *maybe_get(uint32_t id)
{
if (ir.ids.at(id).get_type() == T::type)
if (ir.ids[id].get_type() == static_cast<Types>(T::type))
return &get<T>(id);
else
return nullptr;
@ -70,13 +71,13 @@ private:
template <typename T>
const T &get(uint32_t id) const
{
return variant_get<T>(ir.ids.at(id));
return variant_get<T>(ir.ids[id]);
}
template <typename T>
const T *maybe_get(uint32_t id) const
{
if (ir.ids.at(id).get_type() == T::type)
if (ir.ids[id].get_type() == T::type)
return &get<T>(id);
else
return nullptr;

View File

@ -264,18 +264,11 @@ string CompilerReflection::compile()
void CompilerReflection::emit_types()
{
bool emitted_open_tag = false;
for (auto &id : ir.ids)
{
auto idType = id.get_type();
if (idType == TypeType)
{
auto &type = id.get<SPIRType>();
ir.for_each_typed_id<SPIRType>([&](uint32_t, SPIRType &type) {
if (type.basetype == SPIRType::Struct && !type.pointer && type.array.empty())
{
emit_type(type, emitted_open_tag);
}
}
}
});
if (emitted_open_tag)
{
@ -565,9 +558,16 @@ void CompilerReflection::emit_specialization_constants()
string CompilerReflection::to_member_name(const SPIRType &type, uint32_t index) const
{
auto &memb = ir.meta[type.self].members;
auto *type_meta = ir.find_meta(type.self);
if (type_meta)
{
auto &memb = type_meta->members;
if (index < memb.size() && !memb[index].alias.empty())
return memb[index].alias;
else
return join("_m", index);
}
else
return join("_m", index);
}