GLSL: Implement GL_EXT_buffer_reference.
Buffer objects can contain arbitrary pointers to blocks. We can also implement ConvertPtrToU and ConvertUToPtr. The latter can cast a uint64_t to any type as it pleases, so we will need to generate fake buffer reference blocks to be able to cast the type.
This commit is contained in:
parent
f940b6c0c5
commit
2cc374a0c8
@ -0,0 +1,25 @@
|
|||||||
|
#version 450
|
||||||
|
#extension GL_EXT_buffer_reference : require
|
||||||
|
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
|
||||||
|
|
||||||
|
layout(buffer_reference) buffer Block;
|
||||||
|
layout(buffer_reference, std430) buffer Block
|
||||||
|
{
|
||||||
|
float v;
|
||||||
|
};
|
||||||
|
|
||||||
|
layout(set = 0, binding = 0, std140) uniform UBO
|
||||||
|
{
|
||||||
|
Block blocks[4];
|
||||||
|
} ubo;
|
||||||
|
|
||||||
|
void main()
|
||||||
|
{
|
||||||
|
Block blocks[4];
|
||||||
|
blocks[0] = ubo.blocks[0];
|
||||||
|
blocks[1] = ubo.blocks[1];
|
||||||
|
blocks[2] = ubo.blocks[2];
|
||||||
|
blocks[3] = ubo.blocks[3];
|
||||||
|
blocks[gl_WorkGroupID.x].v = 20.0;
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,45 @@
|
|||||||
|
#version 450
|
||||||
|
#extension GL_ARB_gpu_shader_int64 : require
|
||||||
|
#extension GL_EXT_buffer_reference : require
|
||||||
|
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
|
||||||
|
|
||||||
|
layout(buffer_reference) buffer Node;
|
||||||
|
layout(buffer_reference, std140) buffer Node
|
||||||
|
{
|
||||||
|
layout(offset = 0) int value;
|
||||||
|
layout(offset = 16) Node next;
|
||||||
|
layout(offset = 32) Node prev;
|
||||||
|
};
|
||||||
|
|
||||||
|
layout(set = 0, binding = 0, std430) restrict buffer LinkedList
|
||||||
|
{
|
||||||
|
Node head1;
|
||||||
|
Node head2;
|
||||||
|
} _50;
|
||||||
|
|
||||||
|
void main()
|
||||||
|
{
|
||||||
|
Node _45;
|
||||||
|
if (gl_WorkGroupID.x < 4u)
|
||||||
|
{
|
||||||
|
_45 = _50.head1;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
_45 = _50.head2;
|
||||||
|
}
|
||||||
|
restrict Node n = _45;
|
||||||
|
Node param = n.next;
|
||||||
|
Node param_1 = _50.head1;
|
||||||
|
Node param_2 = _50.head2;
|
||||||
|
param.value = param_1.value + param_2.value;
|
||||||
|
Node param_4 = _50.head1;
|
||||||
|
Node param_3 = param_4;
|
||||||
|
n = param_3;
|
||||||
|
int v = _50.head2.value;
|
||||||
|
n.value = 20;
|
||||||
|
n.value = v * 10;
|
||||||
|
uint64_t uptr = uint64_t(_50.head2.next);
|
||||||
|
Node unode = Node(uptr);
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,21 @@
|
|||||||
|
#version 450
|
||||||
|
#extension GL_ARB_gpu_shader_int64 : require
|
||||||
|
#extension GL_EXT_buffer_reference : require
|
||||||
|
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
|
||||||
|
|
||||||
|
layout(buffer_reference) buffer uintPointer
|
||||||
|
{
|
||||||
|
uint value;
|
||||||
|
};
|
||||||
|
|
||||||
|
layout(push_constant, std430) uniform _4_12
|
||||||
|
{
|
||||||
|
uint64_t _m0;
|
||||||
|
} _12;
|
||||||
|
|
||||||
|
void main()
|
||||||
|
{
|
||||||
|
uintPointer _3 = uintPointer(_12._m0);
|
||||||
|
_3.value = 20u;
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,21 @@
|
|||||||
|
#version 450
|
||||||
|
#extension GL_ARB_gpu_shader_int64 : require
|
||||||
|
#extension GL_EXT_buffer_reference : require
|
||||||
|
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
|
||||||
|
|
||||||
|
layout(buffer_reference) buffer uint0_Pointer
|
||||||
|
{
|
||||||
|
uint value[];
|
||||||
|
};
|
||||||
|
|
||||||
|
layout(push_constant, std430) uniform _6_14
|
||||||
|
{
|
||||||
|
uint64_t _m0;
|
||||||
|
} _14;
|
||||||
|
|
||||||
|
void main()
|
||||||
|
{
|
||||||
|
uint0_Pointer _5 = uint0_Pointer(_14._m0);
|
||||||
|
_5.value[10] = 20u;
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,25 @@
|
|||||||
|
#version 450
|
||||||
|
#extension GL_EXT_buffer_reference : require
|
||||||
|
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
|
||||||
|
|
||||||
|
layout(buffer_reference) buffer Block;
|
||||||
|
layout(buffer_reference, std430) buffer Block
|
||||||
|
{
|
||||||
|
float v;
|
||||||
|
};
|
||||||
|
|
||||||
|
layout(set = 0, binding = 0, std140) uniform UBO
|
||||||
|
{
|
||||||
|
Block blocks[4];
|
||||||
|
} ubo;
|
||||||
|
|
||||||
|
void main()
|
||||||
|
{
|
||||||
|
Block blocks[4];
|
||||||
|
blocks[0] = ubo.blocks[0];
|
||||||
|
blocks[1] = ubo.blocks[1];
|
||||||
|
blocks[2] = ubo.blocks[2];
|
||||||
|
blocks[3] = ubo.blocks[3];
|
||||||
|
blocks[gl_WorkGroupID.x].v = 20.0;
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,56 @@
|
|||||||
|
#version 450
|
||||||
|
#extension GL_ARB_gpu_shader_int64 : require
|
||||||
|
#extension GL_EXT_buffer_reference : require
|
||||||
|
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
|
||||||
|
|
||||||
|
layout(buffer_reference) buffer Node;
|
||||||
|
layout(buffer_reference, std140) buffer Node
|
||||||
|
{
|
||||||
|
layout(offset = 0) int value;
|
||||||
|
layout(offset = 16) Node next;
|
||||||
|
layout(offset = 32) Node prev;
|
||||||
|
};
|
||||||
|
|
||||||
|
layout(set = 0, binding = 0, std430) restrict buffer LinkedList
|
||||||
|
{
|
||||||
|
Node head1;
|
||||||
|
Node head2;
|
||||||
|
} _50;
|
||||||
|
|
||||||
|
void copy_node(restrict Node dst, restrict Node a, restrict Node b)
|
||||||
|
{
|
||||||
|
dst.value = a.value + b.value;
|
||||||
|
}
|
||||||
|
|
||||||
|
void overwrite_node(out Node dst, Node src)
|
||||||
|
{
|
||||||
|
dst = src;
|
||||||
|
}
|
||||||
|
|
||||||
|
void main()
|
||||||
|
{
|
||||||
|
Node _45;
|
||||||
|
if (gl_WorkGroupID.x < 4u)
|
||||||
|
{
|
||||||
|
_45 = _50.head1;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
_45 = _50.head2;
|
||||||
|
}
|
||||||
|
restrict Node n = _45;
|
||||||
|
Node param = n.next;
|
||||||
|
Node param_1 = _50.head1;
|
||||||
|
Node param_2 = _50.head2;
|
||||||
|
copy_node(param, param_1, param_2);
|
||||||
|
Node param_4 = _50.head1;
|
||||||
|
Node param_3;
|
||||||
|
overwrite_node(param_3, param_4);
|
||||||
|
n = param_3;
|
||||||
|
int v = _50.head2.value;
|
||||||
|
n.value = 20;
|
||||||
|
n.value = v * 10;
|
||||||
|
uint64_t uptr = uint64_t(_50.head2.next);
|
||||||
|
Node unode = Node(uptr);
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,44 @@
|
|||||||
|
; SPIR-V
|
||||||
|
; Version: 1.0
|
||||||
|
; Generator: Khronos Glslang Reference Front End; 7
|
||||||
|
; Bound: 27
|
||||||
|
; Schema: 0
|
||||||
|
OpCapability Shader
|
||||||
|
OpCapability Int64
|
||||||
|
OpCapability PhysicalStorageBufferAddressesEXT
|
||||||
|
OpExtension "SPV_EXT_physical_storage_buffer"
|
||||||
|
%1 = OpExtInstImport "GLSL.std.450"
|
||||||
|
OpMemoryModel PhysicalStorageBuffer64EXT GLSL450
|
||||||
|
OpEntryPoint GLCompute %main "main"
|
||||||
|
OpExecutionMode %main LocalSize 1 1 1
|
||||||
|
OpSource GLSL 450
|
||||||
|
OpSourceExtension "GL_ARB_gpu_shader_int64"
|
||||||
|
OpSourceExtension "GL_EXT_buffer_reference"
|
||||||
|
OpDecorate %ptr AliasedPointerEXT
|
||||||
|
OpMemberDecorate %Registers 0 Offset 0
|
||||||
|
OpDecorate %Registers Block
|
||||||
|
%void = OpTypeVoid
|
||||||
|
%3 = OpTypeFunction %void
|
||||||
|
%uint = OpTypeInt 32 0
|
||||||
|
%_ptr_PhysicalStorageBufferEXT_uint = OpTypePointer PhysicalStorageBufferEXT %uint
|
||||||
|
%_ptr_Function__ptr_PhysicalStorageBufferEXT_uint = OpTypePointer Function %_ptr_PhysicalStorageBufferEXT_uint
|
||||||
|
%ulong = OpTypeInt 64 0
|
||||||
|
%Registers = OpTypeStruct %ulong
|
||||||
|
%_ptr_PushConstant_Registers = OpTypePointer PushConstant %Registers
|
||||||
|
%registers = OpVariable %_ptr_PushConstant_Registers PushConstant
|
||||||
|
%int = OpTypeInt 32 1
|
||||||
|
%int_0 = OpConstant %int 0
|
||||||
|
%_ptr_PushConstant_ulong = OpTypePointer PushConstant %ulong
|
||||||
|
%int_10 = OpConstant %int 10
|
||||||
|
%uint_20 = OpConstant %uint 20
|
||||||
|
%main = OpFunction %void None %3
|
||||||
|
%5 = OpLabel
|
||||||
|
%ptr = OpVariable %_ptr_Function__ptr_PhysicalStorageBufferEXT_uint Function
|
||||||
|
%19 = OpAccessChain %_ptr_PushConstant_ulong %registers %int_0
|
||||||
|
%20 = OpLoad %ulong %19
|
||||||
|
%21 = OpConvertUToPtr %_ptr_PhysicalStorageBufferEXT_uint %20
|
||||||
|
OpStore %ptr %21
|
||||||
|
%22 = OpLoad %_ptr_PhysicalStorageBufferEXT_uint %ptr
|
||||||
|
OpStore %22 %uint_20 Aligned 4
|
||||||
|
OpReturn
|
||||||
|
OpFunctionEnd
|
@ -0,0 +1,51 @@
|
|||||||
|
; SPIR-V
|
||||||
|
; Version: 1.0
|
||||||
|
; Generator: Khronos Glslang Reference Front End; 7
|
||||||
|
; Bound: 27
|
||||||
|
; Schema: 0
|
||||||
|
OpCapability Shader
|
||||||
|
OpCapability Int64
|
||||||
|
OpCapability PhysicalStorageBufferAddressesEXT
|
||||||
|
OpExtension "SPV_EXT_physical_storage_buffer"
|
||||||
|
%1 = OpExtInstImport "GLSL.std.450"
|
||||||
|
OpMemoryModel PhysicalStorageBuffer64EXT GLSL450
|
||||||
|
OpEntryPoint GLCompute %main "main"
|
||||||
|
OpExecutionMode %main LocalSize 1 1 1
|
||||||
|
OpSource GLSL 450
|
||||||
|
OpSourceExtension "GL_ARB_gpu_shader_int64"
|
||||||
|
OpSourceExtension "GL_EXT_buffer_reference"
|
||||||
|
OpDecorate %_runtimearr_uint ArrayStride 4
|
||||||
|
OpMemberDecorate %uintPtr 0 Offset 0
|
||||||
|
OpDecorate %uintPtr Block
|
||||||
|
OpDecorate %ptr AliasedPointerEXT
|
||||||
|
OpMemberDecorate %Registers 0 Offset 0
|
||||||
|
OpDecorate %Registers Block
|
||||||
|
%void = OpTypeVoid
|
||||||
|
%3 = OpTypeFunction %void
|
||||||
|
%uint = OpTypeInt 32 0
|
||||||
|
%_runtimearr_uint = OpTypeRuntimeArray %uint
|
||||||
|
%uintPtr = OpTypeStruct %_runtimearr_uint
|
||||||
|
%_ptr_PhysicalStorageBufferEXT_uint_array = OpTypePointer PhysicalStorageBufferEXT %_runtimearr_uint
|
||||||
|
%_ptr_Function__ptr_PhysicalStorageBufferEXT_uint_array = OpTypePointer Function %_ptr_PhysicalStorageBufferEXT_uint_array
|
||||||
|
%ulong = OpTypeInt 64 0
|
||||||
|
%Registers = OpTypeStruct %ulong
|
||||||
|
%_ptr_PushConstant_Registers = OpTypePointer PushConstant %Registers
|
||||||
|
%registers = OpVariable %_ptr_PushConstant_Registers PushConstant
|
||||||
|
%int = OpTypeInt 32 1
|
||||||
|
%int_0 = OpConstant %int 0
|
||||||
|
%_ptr_PushConstant_ulong = OpTypePointer PushConstant %ulong
|
||||||
|
%int_10 = OpConstant %int 10
|
||||||
|
%uint_20 = OpConstant %uint 20
|
||||||
|
%_ptr_PhysicalStorageBufferEXT_uint = OpTypePointer PhysicalStorageBufferEXT %uint
|
||||||
|
%main = OpFunction %void None %3
|
||||||
|
%5 = OpLabel
|
||||||
|
%ptr = OpVariable %_ptr_Function__ptr_PhysicalStorageBufferEXT_uint_array Function
|
||||||
|
%19 = OpAccessChain %_ptr_PushConstant_ulong %registers %int_0
|
||||||
|
%20 = OpLoad %ulong %19
|
||||||
|
%21 = OpConvertUToPtr %_ptr_PhysicalStorageBufferEXT_uint_array %20
|
||||||
|
OpStore %ptr %21
|
||||||
|
%22 = OpLoad %_ptr_PhysicalStorageBufferEXT_uint_array %ptr
|
||||||
|
%26 = OpAccessChain %_ptr_PhysicalStorageBufferEXT_uint %22 %int_10
|
||||||
|
OpStore %26 %uint_20 Aligned 4
|
||||||
|
OpReturn
|
||||||
|
OpFunctionEnd
|
@ -0,0 +1,23 @@
|
|||||||
|
#version 450
|
||||||
|
#extension GL_EXT_buffer_reference : require
|
||||||
|
layout(local_size_x = 1) in;
|
||||||
|
|
||||||
|
layout(buffer_reference) buffer Block
|
||||||
|
{
|
||||||
|
float v;
|
||||||
|
};
|
||||||
|
|
||||||
|
layout(std140, set = 0, binding = 0) uniform UBO
|
||||||
|
{
|
||||||
|
Block blocks[4];
|
||||||
|
} ubo;
|
||||||
|
|
||||||
|
void main()
|
||||||
|
{
|
||||||
|
Block blocks[4];
|
||||||
|
blocks[0] = ubo.blocks[0];
|
||||||
|
blocks[1] = ubo.blocks[1];
|
||||||
|
blocks[2] = ubo.blocks[2];
|
||||||
|
blocks[3] = ubo.blocks[3];
|
||||||
|
blocks[gl_WorkGroupID.x].v = 20.0;
|
||||||
|
}
|
40
shaders/vulkan/comp/buffer-reference.nocompat.vk.comp
Normal file
40
shaders/vulkan/comp/buffer-reference.nocompat.vk.comp
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
#version 450
|
||||||
|
#extension GL_EXT_buffer_reference : require
|
||||||
|
#extension GL_ARB_gpu_shader_int64 : require
|
||||||
|
|
||||||
|
layout(buffer_reference) buffer Node;
|
||||||
|
layout(buffer_reference) buffer Node
|
||||||
|
{
|
||||||
|
int value;
|
||||||
|
layout(offset = 16) Node next;
|
||||||
|
layout(offset = 32) Node prev;
|
||||||
|
};
|
||||||
|
|
||||||
|
layout(std430, set = 0, binding = 0) buffer LinkedList
|
||||||
|
{
|
||||||
|
restrict Node head1;
|
||||||
|
restrict Node head2;
|
||||||
|
};
|
||||||
|
|
||||||
|
void copy_node(restrict Node dst, restrict Node a, restrict Node b)
|
||||||
|
{
|
||||||
|
dst.value = a.value + b.value;
|
||||||
|
}
|
||||||
|
|
||||||
|
void overwrite_node(out Node dst, Node src)
|
||||||
|
{
|
||||||
|
dst = src;
|
||||||
|
}
|
||||||
|
|
||||||
|
void main()
|
||||||
|
{
|
||||||
|
restrict Node n = gl_WorkGroupID.x < 4u ? head1 : head2;
|
||||||
|
copy_node(n.next, head1, head2);
|
||||||
|
overwrite_node(n, head1);
|
||||||
|
int v = head2.value;
|
||||||
|
n.value = 20;
|
||||||
|
n.value = v * 10;
|
||||||
|
|
||||||
|
uint64_t uptr = uint64_t(head2.next);
|
||||||
|
Node unode = Node(uptr);
|
||||||
|
}
|
@ -74,6 +74,7 @@ bool Compiler::variable_storage_is_aliased(const SPIRVariable &v)
|
|||||||
ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
|
ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
|
||||||
bool image = type.basetype == SPIRType::Image;
|
bool image = type.basetype == SPIRType::Image;
|
||||||
bool counter = type.basetype == SPIRType::AtomicCounter;
|
bool counter = type.basetype == SPIRType::AtomicCounter;
|
||||||
|
bool buffer_reference = type.storage == StorageClassPhysicalStorageBufferEXT;
|
||||||
|
|
||||||
bool is_restrict;
|
bool is_restrict;
|
||||||
if (ssbo)
|
if (ssbo)
|
||||||
@ -81,7 +82,7 @@ bool Compiler::variable_storage_is_aliased(const SPIRVariable &v)
|
|||||||
else
|
else
|
||||||
is_restrict = has_decoration(v.self, DecorationRestrict);
|
is_restrict = has_decoration(v.self, DecorationRestrict);
|
||||||
|
|
||||||
return !is_restrict && (ssbo || image || counter);
|
return !is_restrict && (ssbo || image || counter || buffer_reference);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Compiler::block_is_pure(const SPIRBlock &block)
|
bool Compiler::block_is_pure(const SPIRBlock &block)
|
||||||
@ -300,18 +301,41 @@ void Compiler::register_write(uint32_t chain)
|
|||||||
|
|
||||||
if (var)
|
if (var)
|
||||||
{
|
{
|
||||||
|
bool check_argument_storage_qualifier = true;
|
||||||
|
auto &type = expression_type(chain);
|
||||||
|
|
||||||
// If our variable is in a storage class which can alias with other buffers,
|
// If our variable is in a storage class which can alias with other buffers,
|
||||||
// invalidate all variables which depend on aliased variables. And if this is a
|
// invalidate all variables which depend on aliased variables. And if this is a
|
||||||
// variable pointer, then invalidate all variables regardless.
|
// variable pointer, then invalidate all variables regardless.
|
||||||
if (get_variable_data_type(*var).pointer)
|
if (get_variable_data_type(*var).pointer)
|
||||||
|
{
|
||||||
flush_all_active_variables();
|
flush_all_active_variables();
|
||||||
if (variable_storage_is_aliased(*var))
|
|
||||||
|
if (type.pointer_depth == 1)
|
||||||
|
{
|
||||||
|
// We have a backing variable which is a pointer-to-pointer type.
|
||||||
|
// We are storing some data through a pointer acquired through that variable,
|
||||||
|
// but we are not writing to the value of the variable itself,
|
||||||
|
// i.e., we are not modifying the pointer directly.
|
||||||
|
// If we are storing a non-pointer type (pointer_depth == 1),
|
||||||
|
// we know that we are storing some unrelated data.
|
||||||
|
// A case here would be
|
||||||
|
// void foo(Foo * const *arg) {
|
||||||
|
// Foo *bar = *arg;
|
||||||
|
// bar->unrelated = 42;
|
||||||
|
// }
|
||||||
|
// arg, the argument is constant.
|
||||||
|
check_argument_storage_qualifier = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (type.storage == StorageClassPhysicalStorageBufferEXT || variable_storage_is_aliased(*var))
|
||||||
flush_all_aliased_variables();
|
flush_all_aliased_variables();
|
||||||
else if (var)
|
else if (var)
|
||||||
flush_dependees(*var);
|
flush_dependees(*var);
|
||||||
|
|
||||||
// We tried to write to a parameter which is not marked with out qualifier, force a recompile.
|
// We tried to write to a parameter which is not marked with out qualifier, force a recompile.
|
||||||
if (var->parameter && var->parameter->write_count == 0)
|
if (check_argument_storage_qualifier && var->parameter && var->parameter->write_count == 0)
|
||||||
{
|
{
|
||||||
var->parameter->write_count++;
|
var->parameter->write_count++;
|
||||||
force_recompile();
|
force_recompile();
|
||||||
@ -4114,8 +4138,13 @@ Bitset Compiler::combined_decoration_for_member(const SPIRType &type, uint32_t i
|
|||||||
|
|
||||||
// If our type is a struct, traverse all the members as well recursively.
|
// If our type is a struct, traverse all the members as well recursively.
|
||||||
flags.merge_or(dec.decoration_flags);
|
flags.merge_or(dec.decoration_flags);
|
||||||
|
|
||||||
for (uint32_t i = 0; i < type.member_types.size(); i++)
|
for (uint32_t i = 0; i < type.member_types.size(); i++)
|
||||||
flags.merge_or(combined_decoration_for_member(get<SPIRType>(type.member_types[i]), i));
|
{
|
||||||
|
auto &memb_type = get<SPIRType>(type.member_types[i]);
|
||||||
|
if (!memb_type.pointer)
|
||||||
|
flags.merge_or(combined_decoration_for_member(memb_type, i));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return flags;
|
return flags;
|
||||||
@ -4180,3 +4209,44 @@ void Compiler::clear_force_recompile()
|
|||||||
{
|
{
|
||||||
is_force_recompile = false;
|
is_force_recompile = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Compiler::PhysicalStorageBufferPointerHandler::PhysicalStorageBufferPointerHandler(Compiler &compiler_)
|
||||||
|
: compiler(compiler_)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Compiler::PhysicalStorageBufferPointerHandler::handle(Op op, const uint32_t *args, uint32_t)
|
||||||
|
{
|
||||||
|
if (op == OpConvertUToPtr)
|
||||||
|
{
|
||||||
|
auto &type = compiler.get<SPIRType>(args[0]);
|
||||||
|
if (type.storage == StorageClassPhysicalStorageBufferEXT && type.pointer && type.pointer_depth == 1)
|
||||||
|
{
|
||||||
|
// If we need to cast to a pointer type which is not a block, we might need to synthesize ourselves
|
||||||
|
// a block type which wraps this POD type.
|
||||||
|
if (type.basetype != SPIRType::Struct)
|
||||||
|
types.insert(args[0]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Compiler::analyze_non_block_pointer_types()
|
||||||
|
{
|
||||||
|
PhysicalStorageBufferPointerHandler handler(*this);
|
||||||
|
traverse_all_reachable_opcodes(get<SPIRFunction>(ir.default_entry_point), handler);
|
||||||
|
physical_storage_non_block_pointer_types.reserve(handler.types.size());
|
||||||
|
for (auto type : handler.types)
|
||||||
|
physical_storage_non_block_pointer_types.push_back(type);
|
||||||
|
sort(begin(physical_storage_non_block_pointer_types), end(physical_storage_non_block_pointer_types));
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Compiler::type_is_array_of_pointers(const SPIRType &type) const
|
||||||
|
{
|
||||||
|
if (!type.pointer)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
// If parent type has same pointer depth, we must have an array of pointers.
|
||||||
|
return type.pointer_depth == get<SPIRType>(type.parent_type).pointer_depth;
|
||||||
|
}
|
||||||
|
@ -932,6 +932,16 @@ protected:
|
|||||||
uint32_t write_count = 0;
|
uint32_t write_count = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct PhysicalStorageBufferPointerHandler : OpcodeHandler
|
||||||
|
{
|
||||||
|
PhysicalStorageBufferPointerHandler(Compiler &compiler_);
|
||||||
|
bool handle(spv::Op op, const uint32_t *args, uint32_t length) override;
|
||||||
|
Compiler &compiler;
|
||||||
|
std::unordered_set<uint32_t> types;
|
||||||
|
};
|
||||||
|
void analyze_non_block_pointer_types();
|
||||||
|
SmallVector<uint32_t> physical_storage_non_block_pointer_types;
|
||||||
|
|
||||||
void analyze_variable_scope(SPIRFunction &function, AnalyzeVariableScopeAccessHandler &handler);
|
void analyze_variable_scope(SPIRFunction &function, AnalyzeVariableScopeAccessHandler &handler);
|
||||||
void find_function_local_luts(SPIRFunction &function, const AnalyzeVariableScopeAccessHandler &handler,
|
void find_function_local_luts(SPIRFunction &function, const AnalyzeVariableScopeAccessHandler &handler,
|
||||||
bool single_function);
|
bool single_function);
|
||||||
@ -959,6 +969,8 @@ protected:
|
|||||||
bool has_extended_member_decoration(uint32_t type, uint32_t index, ExtendedDecorations decoration) const;
|
bool has_extended_member_decoration(uint32_t type, uint32_t index, ExtendedDecorations decoration) const;
|
||||||
void unset_extended_member_decoration(uint32_t type, uint32_t index, ExtendedDecorations decoration);
|
void unset_extended_member_decoration(uint32_t type, uint32_t index, ExtendedDecorations decoration);
|
||||||
|
|
||||||
|
bool type_is_array_of_pointers(const SPIRType &type) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// Used only to implement the old deprecated get_entry_point() interface.
|
// Used only to implement the old deprecated get_entry_point() interface.
|
||||||
const SPIREntryPoint &get_first_entry_point(const std::string &name) const;
|
const SPIREntryPoint &get_first_entry_point(const std::string &name) const;
|
||||||
|
@ -66,6 +66,8 @@ ParsedIR &ParsedIR::operator=(ParsedIR &&other) SPIRV_CROSS_NOEXCEPT
|
|||||||
continue_block_to_loop_header = move(other.continue_block_to_loop_header);
|
continue_block_to_loop_header = move(other.continue_block_to_loop_header);
|
||||||
entry_points = move(other.entry_points);
|
entry_points = move(other.entry_points);
|
||||||
ids = move(other.ids);
|
ids = move(other.ids);
|
||||||
|
addressing_model = other.addressing_model;
|
||||||
|
memory_model = other.memory_model;
|
||||||
|
|
||||||
default_entry_point = other.default_entry_point;
|
default_entry_point = other.default_entry_point;
|
||||||
source = other.source;
|
source = other.source;
|
||||||
@ -98,6 +100,8 @@ ParsedIR &ParsedIR::operator=(const ParsedIR &other)
|
|||||||
default_entry_point = other.default_entry_point;
|
default_entry_point = other.default_entry_point;
|
||||||
source = other.source;
|
source = other.source;
|
||||||
loop_iteration_depth = other.loop_iteration_depth;
|
loop_iteration_depth = other.loop_iteration_depth;
|
||||||
|
addressing_model = other.addressing_model;
|
||||||
|
memory_model = other.memory_model;
|
||||||
|
|
||||||
// Very deliberate copying of IDs. There is no default copy constructor, nor a simple default constructor.
|
// Very deliberate copying of IDs. There is no default copy constructor, nor a simple default constructor.
|
||||||
// Construct object first so we have the correct allocator set-up, then we can copy object into our new pool group.
|
// Construct object first so we have the correct allocator set-up, then we can copy object into our new pool group.
|
||||||
@ -692,24 +696,27 @@ void ParsedIR::add_typed_id(Types type, uint32_t id)
|
|||||||
if (loop_iteration_depth)
|
if (loop_iteration_depth)
|
||||||
SPIRV_CROSS_THROW("Cannot add typed ID while looping over it.");
|
SPIRV_CROSS_THROW("Cannot add typed ID while looping over it.");
|
||||||
|
|
||||||
switch (type)
|
if (ids[id].empty() || ids[id].get_type() != type)
|
||||||
{
|
{
|
||||||
case TypeConstant:
|
switch (type)
|
||||||
ids_for_constant_or_variable.push_back(id);
|
{
|
||||||
ids_for_constant_or_type.push_back(id);
|
case TypeConstant:
|
||||||
break;
|
ids_for_constant_or_variable.push_back(id);
|
||||||
|
ids_for_constant_or_type.push_back(id);
|
||||||
|
break;
|
||||||
|
|
||||||
case TypeVariable:
|
case TypeVariable:
|
||||||
ids_for_constant_or_variable.push_back(id);
|
ids_for_constant_or_variable.push_back(id);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case TypeType:
|
case TypeType:
|
||||||
case TypeConstantOp:
|
case TypeConstantOp:
|
||||||
ids_for_constant_or_type.push_back(id);
|
ids_for_constant_or_type.push_back(id);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ids[id].empty())
|
if (ids[id].empty())
|
||||||
|
@ -107,6 +107,9 @@ public:
|
|||||||
|
|
||||||
Source source;
|
Source source;
|
||||||
|
|
||||||
|
spv::AddressingModel addressing_model = spv::AddressingModelMax;
|
||||||
|
spv::MemoryModel memory_model = spv::MemoryModelMax;
|
||||||
|
|
||||||
// Decoration handling methods.
|
// Decoration handling methods.
|
||||||
// Can be useful for simple "raw" reflection.
|
// Can be useful for simple "raw" reflection.
|
||||||
// However, most members are here because the Parser needs most of these,
|
// However, most members are here because the Parser needs most of these,
|
||||||
|
333
spirv_glsl.cpp
333
spirv_glsl.cpp
@ -430,6 +430,21 @@ void CompilerGLSL::find_static_extensions()
|
|||||||
|
|
||||||
if (options.separate_shader_objects && !options.es && options.version < 410)
|
if (options.separate_shader_objects && !options.es && options.version < 410)
|
||||||
require_extension_internal("GL_ARB_separate_shader_objects");
|
require_extension_internal("GL_ARB_separate_shader_objects");
|
||||||
|
|
||||||
|
if (ir.addressing_model == AddressingModelPhysicalStorageBuffer64EXT)
|
||||||
|
{
|
||||||
|
if (!options.vulkan_semantics)
|
||||||
|
SPIRV_CROSS_THROW("GL_EXT_buffer_reference is only supported in Vulkan GLSL.");
|
||||||
|
if (options.es && options.version < 320)
|
||||||
|
SPIRV_CROSS_THROW("GL_EXT_buffer_reference requires ESSL 320.");
|
||||||
|
else if (!options.es && options.version < 450)
|
||||||
|
SPIRV_CROSS_THROW("GL_EXT_buffer_reference requires GLSL 450.");
|
||||||
|
require_extension_internal("GL_EXT_buffer_reference");
|
||||||
|
}
|
||||||
|
else if (ir.addressing_model != AddressingModelLogical)
|
||||||
|
{
|
||||||
|
SPIRV_CROSS_THROW("Only Logical and PhysicalStorageBuffer64EXT addressing models are supported.");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
string CompilerGLSL::compile()
|
string CompilerGLSL::compile()
|
||||||
@ -446,6 +461,11 @@ string CompilerGLSL::compile()
|
|||||||
update_active_builtins();
|
update_active_builtins();
|
||||||
analyze_image_and_sampler_usage();
|
analyze_image_and_sampler_usage();
|
||||||
|
|
||||||
|
// Shaders might cast unrelated data to pointers of non-block types.
|
||||||
|
// Find all such instances and make sure we can cast the pointers to a synthesized block type.
|
||||||
|
if (ir.addressing_model == AddressingModelPhysicalStorageBuffer64EXT)
|
||||||
|
analyze_non_block_pointer_types();
|
||||||
|
|
||||||
uint32_t pass_count = 0;
|
uint32_t pass_count = 0;
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
@ -972,6 +992,24 @@ uint32_t CompilerGLSL::type_to_packed_base_size(const SPIRType &type, BufferPack
|
|||||||
uint32_t CompilerGLSL::type_to_packed_alignment(const SPIRType &type, const Bitset &flags,
|
uint32_t CompilerGLSL::type_to_packed_alignment(const SPIRType &type, const Bitset &flags,
|
||||||
BufferPackingStandard packing)
|
BufferPackingStandard packing)
|
||||||
{
|
{
|
||||||
|
// If using PhysicalStorageBufferEXT storage class, this is a pointer,
|
||||||
|
// and is 64-bit.
|
||||||
|
if (type.storage == StorageClassPhysicalStorageBufferEXT)
|
||||||
|
{
|
||||||
|
if (!type.pointer)
|
||||||
|
SPIRV_CROSS_THROW("Types in PhysicalStorageBufferEXT must be pointers.");
|
||||||
|
|
||||||
|
if (ir.addressing_model == AddressingModelPhysicalStorageBuffer64EXT)
|
||||||
|
{
|
||||||
|
if (packing_is_vec4_padded(packing) && type_is_array_of_pointers(type))
|
||||||
|
return 16;
|
||||||
|
else
|
||||||
|
return 8;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
SPIRV_CROSS_THROW("AddressingModelPhysicalStorageBuffer64EXT must be used for PhysicalStorageBufferEXT.");
|
||||||
|
}
|
||||||
|
|
||||||
if (!type.array.empty())
|
if (!type.array.empty())
|
||||||
{
|
{
|
||||||
uint32_t minimum_alignment = 1;
|
uint32_t minimum_alignment = 1;
|
||||||
@ -1088,6 +1126,19 @@ uint32_t CompilerGLSL::type_to_packed_size(const SPIRType &type, const Bitset &f
|
|||||||
return to_array_size_literal(type) * type_to_packed_array_stride(type, flags, packing);
|
return to_array_size_literal(type) * type_to_packed_array_stride(type, flags, packing);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If using PhysicalStorageBufferEXT storage class, this is a pointer,
|
||||||
|
// and is 64-bit.
|
||||||
|
if (type.storage == StorageClassPhysicalStorageBufferEXT)
|
||||||
|
{
|
||||||
|
if (!type.pointer)
|
||||||
|
SPIRV_CROSS_THROW("Types in PhysicalStorageBufferEXT must be pointers.");
|
||||||
|
|
||||||
|
if (ir.addressing_model == AddressingModelPhysicalStorageBuffer64EXT)
|
||||||
|
return 8;
|
||||||
|
else
|
||||||
|
SPIRV_CROSS_THROW("AddressingModelPhysicalStorageBuffer64EXT must be used for PhysicalStorageBufferEXT.");
|
||||||
|
}
|
||||||
|
|
||||||
uint32_t size = 0;
|
uint32_t size = 0;
|
||||||
|
|
||||||
if (type.basetype == SPIRType::Struct)
|
if (type.basetype == SPIRType::Struct)
|
||||||
@ -1211,7 +1262,7 @@ bool CompilerGLSL::buffer_is_packing_standard(const SPIRType &type, BufferPackin
|
|||||||
|
|
||||||
// The next member following a struct member is aligned to the base alignment of the struct that came before.
|
// The next member following a struct member is aligned to the base alignment of the struct that came before.
|
||||||
// GL 4.5 spec, 7.6.2.2.
|
// GL 4.5 spec, 7.6.2.2.
|
||||||
if (memb_type.basetype == SPIRType::Struct)
|
if (memb_type.basetype == SPIRType::Struct && !memb_type.pointer)
|
||||||
pad_alignment = packed_alignment;
|
pad_alignment = packed_alignment;
|
||||||
else
|
else
|
||||||
pad_alignment = 1;
|
pad_alignment = 1;
|
||||||
@ -1237,8 +1288,11 @@ bool CompilerGLSL::buffer_is_packing_standard(const SPIRType &type, BufferPackin
|
|||||||
// We cannot use enhanced layouts on substructs, so they better be up to spec.
|
// We cannot use enhanced layouts on substructs, so they better be up to spec.
|
||||||
auto substruct_packing = packing_to_substruct_packing(packing);
|
auto substruct_packing = packing_to_substruct_packing(packing);
|
||||||
|
|
||||||
if (!memb_type.member_types.empty() && !buffer_is_packing_standard(memb_type, substruct_packing))
|
if (!memb_type.pointer && !memb_type.member_types.empty() &&
|
||||||
|
!buffer_is_packing_standard(memb_type, substruct_packing))
|
||||||
|
{
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bump size.
|
// Bump size.
|
||||||
@ -1412,41 +1466,7 @@ string CompilerGLSL::layout_for_variable(const SPIRVariable &var)
|
|||||||
}
|
}
|
||||||
else if (can_use_buffer_blocks && (push_constant_block || ssbo_block))
|
else if (can_use_buffer_blocks && (push_constant_block || ssbo_block))
|
||||||
{
|
{
|
||||||
if (buffer_is_packing_standard(type, BufferPackingStd430))
|
attr.push_back(buffer_to_packing_standard(type));
|
||||||
attr.push_back("std430");
|
|
||||||
else if (buffer_is_packing_standard(type, BufferPackingStd140))
|
|
||||||
attr.push_back("std140");
|
|
||||||
else if (buffer_is_packing_standard(type, BufferPackingStd140EnhancedLayout))
|
|
||||||
{
|
|
||||||
attr.push_back("std140");
|
|
||||||
|
|
||||||
// Fallback time. We might be able to use the ARB_enhanced_layouts to deal with this difference,
|
|
||||||
// however, we can only use layout(offset) on the block itself, not any substructs, so the substructs better be the appropriate layout.
|
|
||||||
// Enhanced layouts seem to always work in Vulkan GLSL, so no need for extensions there.
|
|
||||||
if (options.es && !options.vulkan_semantics)
|
|
||||||
SPIRV_CROSS_THROW("Push constant block cannot be expressed as neither std430 nor std140. ES-targets do "
|
|
||||||
"not support GL_ARB_enhanced_layouts.");
|
|
||||||
if (!options.es && !options.vulkan_semantics && options.version < 440)
|
|
||||||
require_extension_internal("GL_ARB_enhanced_layouts");
|
|
||||||
|
|
||||||
set_extended_decoration(type.self, SPIRVCrossDecorationPacked);
|
|
||||||
}
|
|
||||||
else if (buffer_is_packing_standard(type, BufferPackingStd430EnhancedLayout))
|
|
||||||
{
|
|
||||||
attr.push_back("std430");
|
|
||||||
if (options.es && !options.vulkan_semantics)
|
|
||||||
SPIRV_CROSS_THROW("Push constant block cannot be expressed as neither std430 nor std140. ES-targets do "
|
|
||||||
"not support GL_ARB_enhanced_layouts.");
|
|
||||||
if (!options.es && !options.vulkan_semantics && options.version < 440)
|
|
||||||
require_extension_internal("GL_ARB_enhanced_layouts");
|
|
||||||
|
|
||||||
set_extended_decoration(type.self, SPIRVCrossDecorationPacked);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
SPIRV_CROSS_THROW("Buffer block cannot be expressed as neither std430 nor std140, even with enhanced "
|
|
||||||
"layouts. You can try flattening this block to support a more flexible layout.");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// For images, the type itself adds a layout qualifer.
|
// For images, the type itself adds a layout qualifer.
|
||||||
@ -1467,6 +1487,44 @@ string CompilerGLSL::layout_for_variable(const SPIRVariable &var)
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
string CompilerGLSL::buffer_to_packing_standard(const SPIRType &type)
|
||||||
|
{
|
||||||
|
if (buffer_is_packing_standard(type, BufferPackingStd430))
|
||||||
|
return "std430";
|
||||||
|
else if (buffer_is_packing_standard(type, BufferPackingStd140))
|
||||||
|
return "std140";
|
||||||
|
else if (buffer_is_packing_standard(type, BufferPackingStd140EnhancedLayout))
|
||||||
|
{
|
||||||
|
// Fallback time. We might be able to use the ARB_enhanced_layouts to deal with this difference,
|
||||||
|
// however, we can only use layout(offset) on the block itself, not any substructs, so the substructs better be the appropriate layout.
|
||||||
|
// Enhanced layouts seem to always work in Vulkan GLSL, so no need for extensions there.
|
||||||
|
if (options.es && !options.vulkan_semantics)
|
||||||
|
SPIRV_CROSS_THROW("Push constant block cannot be expressed as neither std430 nor std140. ES-targets do "
|
||||||
|
"not support GL_ARB_enhanced_layouts.");
|
||||||
|
if (!options.es && !options.vulkan_semantics && options.version < 440)
|
||||||
|
require_extension_internal("GL_ARB_enhanced_layouts");
|
||||||
|
|
||||||
|
set_extended_decoration(type.self, SPIRVCrossDecorationPacked);
|
||||||
|
return "std140";
|
||||||
|
}
|
||||||
|
else if (buffer_is_packing_standard(type, BufferPackingStd430EnhancedLayout))
|
||||||
|
{
|
||||||
|
if (options.es && !options.vulkan_semantics)
|
||||||
|
SPIRV_CROSS_THROW("Push constant block cannot be expressed as neither std430 nor std140. ES-targets do "
|
||||||
|
"not support GL_ARB_enhanced_layouts.");
|
||||||
|
if (!options.es && !options.vulkan_semantics && options.version < 440)
|
||||||
|
require_extension_internal("GL_ARB_enhanced_layouts");
|
||||||
|
|
||||||
|
set_extended_decoration(type.self, SPIRVCrossDecorationPacked);
|
||||||
|
return "std430";
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
SPIRV_CROSS_THROW("Buffer block cannot be expressed as neither std430 nor std140, even with enhanced "
|
||||||
|
"layouts. You can try flattening this block to support a more flexible layout.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void CompilerGLSL::emit_push_constant_block(const SPIRVariable &var)
|
void CompilerGLSL::emit_push_constant_block(const SPIRVariable &var)
|
||||||
{
|
{
|
||||||
if (flattened_buffer_blocks.count(var.self))
|
if (flattened_buffer_blocks.count(var.self))
|
||||||
@ -1544,6 +1602,81 @@ void CompilerGLSL::emit_buffer_block_legacy(const SPIRVariable &var)
|
|||||||
statement("");
|
statement("");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void CompilerGLSL::emit_buffer_reference_block(SPIRType &type, bool forward_declaration)
|
||||||
|
{
|
||||||
|
string buffer_name;
|
||||||
|
|
||||||
|
if (forward_declaration)
|
||||||
|
{
|
||||||
|
// Block names should never alias, but from HLSL input they kind of can because block types are reused for UAVs ...
|
||||||
|
// Allow aliased name since we might be declaring the block twice. Once with buffer reference (forward declared) and one proper declaration.
|
||||||
|
// The names must match up.
|
||||||
|
buffer_name = to_name(type.self, false);
|
||||||
|
|
||||||
|
// Shaders never use the block by interface name, so we don't
|
||||||
|
// have to track this other than updating name caches.
|
||||||
|
// If we have a collision for any reason, just fallback immediately.
|
||||||
|
if (ir.meta[type.self].decoration.alias.empty() ||
|
||||||
|
block_ssbo_names.find(buffer_name) != end(block_ssbo_names) ||
|
||||||
|
resource_names.find(buffer_name) != end(resource_names))
|
||||||
|
{
|
||||||
|
buffer_name = join("_", type.self);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure we get something unique for both global name scope and block name scope.
|
||||||
|
// See GLSL 4.5 spec: section 4.3.9 for details.
|
||||||
|
add_variable(block_ssbo_names, resource_names, buffer_name);
|
||||||
|
|
||||||
|
// If for some reason buffer_name is an illegal name, make a final fallback to a workaround name.
|
||||||
|
// This cannot conflict with anything else, so we're safe now.
|
||||||
|
// We cannot reuse this fallback name in neither global scope (blocked by block_names) nor block name scope.
|
||||||
|
if (buffer_name.empty())
|
||||||
|
buffer_name = join("_", type.self);
|
||||||
|
|
||||||
|
block_names.insert(buffer_name);
|
||||||
|
block_ssbo_names.insert(buffer_name);
|
||||||
|
}
|
||||||
|
else if (type.basetype != SPIRType::Struct)
|
||||||
|
buffer_name = type_to_glsl(type);
|
||||||
|
else
|
||||||
|
buffer_name = to_name(type.self, false);
|
||||||
|
|
||||||
|
if (!forward_declaration)
|
||||||
|
{
|
||||||
|
if (type.basetype == SPIRType::Struct)
|
||||||
|
statement("layout(buffer_reference, ", buffer_to_packing_standard(type), ") buffer ", buffer_name);
|
||||||
|
else
|
||||||
|
statement("layout(buffer_reference) buffer ", buffer_name);
|
||||||
|
|
||||||
|
begin_scope();
|
||||||
|
|
||||||
|
if (type.basetype == SPIRType::Struct)
|
||||||
|
{
|
||||||
|
type.member_name_cache.clear();
|
||||||
|
|
||||||
|
uint32_t i = 0;
|
||||||
|
for (auto &member : type.member_types)
|
||||||
|
{
|
||||||
|
add_member_name(type, i);
|
||||||
|
emit_struct_member(type, member, i);
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
auto &pointee_type = get_pointee_type(type);
|
||||||
|
statement(type_to_glsl(pointee_type), " value", type_to_array_glsl(pointee_type), ";");
|
||||||
|
}
|
||||||
|
|
||||||
|
end_scope_decl();
|
||||||
|
statement("");
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
statement("layout(buffer_reference) buffer ", buffer_name, ";");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void CompilerGLSL::emit_buffer_block_native(const SPIRVariable &var)
|
void CompilerGLSL::emit_buffer_block_native(const SPIRVariable &var)
|
||||||
{
|
{
|
||||||
auto &type = get<SPIRType>(var.basetype);
|
auto &type = get<SPIRType>(var.basetype);
|
||||||
@ -1629,7 +1762,7 @@ void CompilerGLSL::emit_buffer_block_flattened(const SPIRVariable &var)
|
|||||||
SPIRV_CROSS_THROW("Basic types in a flattened UBO must be float, int or uint.");
|
SPIRV_CROSS_THROW("Basic types in a flattened UBO must be float, int or uint.");
|
||||||
|
|
||||||
auto flags = ir.get_buffer_block_flags(var);
|
auto flags = ir.get_buffer_block_flags(var);
|
||||||
statement("uniform ", flags_to_precision_qualifiers_glsl(tmp, flags), type_to_glsl(tmp), " ", buffer_name, "[",
|
statement("uniform ", flags_to_qualifiers_glsl(tmp, flags), type_to_glsl(tmp), " ", buffer_name, "[",
|
||||||
buffer_size, "];");
|
buffer_size, "];");
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -2333,6 +2466,36 @@ void CompilerGLSL::emit_resources()
|
|||||||
|
|
||||||
emitted = false;
|
emitted = false;
|
||||||
|
|
||||||
|
if (ir.addressing_model == AddressingModelPhysicalStorageBuffer64EXT)
|
||||||
|
{
|
||||||
|
for (auto type : physical_storage_non_block_pointer_types)
|
||||||
|
{
|
||||||
|
emit_buffer_reference_block(get<SPIRType>(type), false);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Output buffer reference blocks.
|
||||||
|
// Do this in two stages, one with forward declaration,
|
||||||
|
// and one without. Buffer reference blocks can reference themselves
|
||||||
|
// to support things like linked lists.
|
||||||
|
ir.for_each_typed_id<SPIRType>([&](uint32_t, SPIRType &type) {
|
||||||
|
bool has_block_flags = has_decoration(type.self, DecorationBlock);
|
||||||
|
if (has_block_flags && type.pointer && type.pointer_depth == 1 && !type_is_array_of_pointers(type) &&
|
||||||
|
type.storage == StorageClassPhysicalStorageBufferEXT)
|
||||||
|
{
|
||||||
|
emit_buffer_reference_block(type, true);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
ir.for_each_typed_id<SPIRType>([&](uint32_t, SPIRType &type) {
|
||||||
|
bool has_block_flags = has_decoration(type.self, DecorationBlock);
|
||||||
|
if (has_block_flags && type.pointer && type.pointer_depth == 1 && !type_is_array_of_pointers(type) &&
|
||||||
|
type.storage == StorageClassPhysicalStorageBufferEXT)
|
||||||
|
{
|
||||||
|
emit_buffer_reference_block(type, false);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
// Output UBOs and SSBOs
|
// Output UBOs and SSBOs
|
||||||
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
|
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
|
||||||
auto &type = this->get<SPIRType>(var.basetype);
|
auto &type = this->get<SPIRType>(var.basetype);
|
||||||
@ -2534,15 +2697,22 @@ string CompilerGLSL::enclose_expression(const string &expr)
|
|||||||
return expr;
|
return expr;
|
||||||
}
|
}
|
||||||
|
|
||||||
string CompilerGLSL::dereference_expression(const std::string &expr)
|
string CompilerGLSL::dereference_expression(const SPIRType &expression_type, const std::string &expr)
|
||||||
{
|
{
|
||||||
// If this expression starts with an address-of operator ('&'), then
|
// If this expression starts with an address-of operator ('&'), then
|
||||||
// just return the part after the operator.
|
// just return the part after the operator.
|
||||||
// TODO: Strip parens if unnecessary?
|
// TODO: Strip parens if unnecessary?
|
||||||
if (expr.front() == '&')
|
if (expr.front() == '&')
|
||||||
return expr.substr(1);
|
return expr.substr(1);
|
||||||
else
|
else if (backend.native_pointers)
|
||||||
return join('*', expr);
|
return join('*', expr);
|
||||||
|
else if (expression_type.storage == StorageClassPhysicalStorageBufferEXT &&
|
||||||
|
expression_type.basetype != SPIRType::Struct && expression_type.pointer_depth == 1)
|
||||||
|
{
|
||||||
|
return join(enclose_expression(expr), ".value");
|
||||||
|
}
|
||||||
|
else
|
||||||
|
return expr;
|
||||||
}
|
}
|
||||||
|
|
||||||
string CompilerGLSL::address_of_expression(const std::string &expr)
|
string CompilerGLSL::address_of_expression(const std::string &expr)
|
||||||
@ -2590,7 +2760,7 @@ string CompilerGLSL::to_dereferenced_expression(uint32_t id, bool register_expre
|
|||||||
{
|
{
|
||||||
auto &type = expression_type(id);
|
auto &type = expression_type(id);
|
||||||
if (type.pointer && should_dereference(id))
|
if (type.pointer && should_dereference(id))
|
||||||
return dereference_expression(to_enclosed_expression(id, register_expression_read));
|
return dereference_expression(type, to_enclosed_expression(id, register_expression_read));
|
||||||
else
|
else
|
||||||
return to_expression(id, register_expression_read);
|
return to_expression(id, register_expression_read);
|
||||||
}
|
}
|
||||||
@ -3629,7 +3799,7 @@ void CompilerGLSL::emit_uninitialized_temporary(uint32_t result_type, uint32_t r
|
|||||||
|
|
||||||
// The result_id has not been made into an expression yet, so use flags interface.
|
// The result_id has not been made into an expression yet, so use flags interface.
|
||||||
add_local_variable_name(result_id);
|
add_local_variable_name(result_id);
|
||||||
statement(flags_to_precision_qualifiers_glsl(type, flags), variable_decl(type, to_name(result_id)), ";");
|
statement(flags_to_qualifiers_glsl(type, flags), variable_decl(type, to_name(result_id)), ";");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3664,7 +3834,7 @@ string CompilerGLSL::declare_temporary(uint32_t result_type, uint32_t result_id)
|
|||||||
{
|
{
|
||||||
// The result_id has not been made into an expression yet, so use flags interface.
|
// The result_id has not been made into an expression yet, so use flags interface.
|
||||||
add_local_variable_name(result_id);
|
add_local_variable_name(result_id);
|
||||||
return join(flags_to_precision_qualifiers_glsl(type, flags), variable_decl(type, to_name(result_id)), " = ");
|
return join(flags_to_qualifiers_glsl(type, flags), variable_decl(type, to_name(result_id)), " = ");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5933,6 +6103,21 @@ string CompilerGLSL::access_chain_internal(uint32_t base, const uint32_t *indice
|
|||||||
// Start traversing type hierarchy at the proper non-pointer types,
|
// Start traversing type hierarchy at the proper non-pointer types,
|
||||||
// but keep type_id referencing the original pointer for use below.
|
// but keep type_id referencing the original pointer for use below.
|
||||||
uint32_t type_id = expression_type_id(base);
|
uint32_t type_id = expression_type_id(base);
|
||||||
|
|
||||||
|
if (!backend.native_pointers)
|
||||||
|
{
|
||||||
|
if (ptr_chain)
|
||||||
|
SPIRV_CROSS_THROW("Backend does not support native pointers and does not support OpPtrAccessChain.");
|
||||||
|
|
||||||
|
// Wrapped buffer reference pointer types will need to poke into the internal "value" member before
|
||||||
|
// continuing the access chain.
|
||||||
|
if (should_dereference(base))
|
||||||
|
{
|
||||||
|
auto &type = get<SPIRType>(type_id);
|
||||||
|
expr = dereference_expression(type, expr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const auto *type = &get_pointee_type(type_id);
|
const auto *type = &get_pointee_type(type_id);
|
||||||
|
|
||||||
bool access_chain_is_arrayed = expr.find_first_of('[') != string::npos;
|
bool access_chain_is_arrayed = expr.find_first_of('[') != string::npos;
|
||||||
@ -6780,8 +6965,7 @@ void CompilerGLSL::flush_variable_declaration(uint32_t id)
|
|||||||
{
|
{
|
||||||
auto &type = get<SPIRType>(var->basetype);
|
auto &type = get<SPIRType>(var->basetype);
|
||||||
auto &flags = ir.meta[id].decoration.decoration_flags;
|
auto &flags = ir.meta[id].decoration.decoration_flags;
|
||||||
statement(flags_to_precision_qualifiers_glsl(type, flags), variable_decl(type, join("_", id, "_copy")),
|
statement(flags_to_qualifiers_glsl(type, flags), variable_decl(type, join("_", id, "_copy")), ";");
|
||||||
";");
|
|
||||||
}
|
}
|
||||||
var->deferred_declaration = false;
|
var->deferred_declaration = false;
|
||||||
}
|
}
|
||||||
@ -9285,6 +9469,29 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
|
|||||||
statement("executeCallableNV(", to_expression(ops[0]), ", ", to_expression(ops[1]), ");");
|
statement("executeCallableNV(", to_expression(ops[0]), ", ", to_expression(ops[1]), ");");
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case OpConvertUToPtr:
|
||||||
|
{
|
||||||
|
auto &type = get<SPIRType>(ops[0]);
|
||||||
|
if (type.storage != StorageClassPhysicalStorageBufferEXT)
|
||||||
|
SPIRV_CROSS_THROW("Only StorageClassPhysicalStorageBufferEXT is supported by OpConvertUToPtr.");
|
||||||
|
|
||||||
|
auto op = type_to_glsl(type);
|
||||||
|
emit_unary_func_op(ops[0], ops[1], ops[2], op.c_str());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case OpConvertPtrToU:
|
||||||
|
{
|
||||||
|
auto &type = get<SPIRType>(ops[0]);
|
||||||
|
auto &ptr_type = expression_type(ops[2]);
|
||||||
|
if (ptr_type.storage != StorageClassPhysicalStorageBufferEXT)
|
||||||
|
SPIRV_CROSS_THROW("Only StorageClassPhysicalStorageBufferEXT is supported by OpConvertPtrToU.");
|
||||||
|
|
||||||
|
auto op = type_to_glsl(type);
|
||||||
|
emit_unary_func_op(ops[0], ops[1], ops[2], op.c_str());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
case OpUndef:
|
case OpUndef:
|
||||||
// Undefined value has been declared.
|
// Undefined value has been declared.
|
||||||
break;
|
break;
|
||||||
@ -9442,13 +9649,16 @@ void CompilerGLSL::emit_struct_member(const SPIRType &type, uint32_t member_type
|
|||||||
if (is_block)
|
if (is_block)
|
||||||
qualifiers = to_interpolation_qualifiers(memberflags);
|
qualifiers = to_interpolation_qualifiers(memberflags);
|
||||||
|
|
||||||
statement(layout_for_member(type, index), qualifiers, qualifier,
|
statement(layout_for_member(type, index), qualifiers, qualifier, flags_to_qualifiers_glsl(membertype, memberflags),
|
||||||
flags_to_precision_qualifiers_glsl(membertype, memberflags),
|
|
||||||
variable_decl(membertype, to_member_name(type, index)), ";");
|
variable_decl(membertype, to_member_name(type, index)), ";");
|
||||||
}
|
}
|
||||||
|
|
||||||
const char *CompilerGLSL::flags_to_precision_qualifiers_glsl(const SPIRType &type, const Bitset &flags)
|
const char *CompilerGLSL::flags_to_qualifiers_glsl(const SPIRType &type, const Bitset &flags)
|
||||||
{
|
{
|
||||||
|
// GL_EXT_buffer_reference variables can be marked as restrict.
|
||||||
|
if (flags.get(DecorationRestrictPointerEXT))
|
||||||
|
return "restrict ";
|
||||||
|
|
||||||
// Structs do not have precision qualifiers, neither do doubles (desktop only anyways, so no mediump/highp).
|
// Structs do not have precision qualifiers, neither do doubles (desktop only anyways, so no mediump/highp).
|
||||||
if (type.basetype != SPIRType::Float && type.basetype != SPIRType::Int && type.basetype != SPIRType::UInt &&
|
if (type.basetype != SPIRType::Float && type.basetype != SPIRType::Int && type.basetype != SPIRType::UInt &&
|
||||||
type.basetype != SPIRType::Image && type.basetype != SPIRType::SampledImage &&
|
type.basetype != SPIRType::Image && type.basetype != SPIRType::SampledImage &&
|
||||||
@ -9501,7 +9711,7 @@ const char *CompilerGLSL::flags_to_precision_qualifiers_glsl(const SPIRType &typ
|
|||||||
|
|
||||||
const char *CompilerGLSL::to_precision_qualifiers_glsl(uint32_t id)
|
const char *CompilerGLSL::to_precision_qualifiers_glsl(uint32_t id)
|
||||||
{
|
{
|
||||||
return flags_to_precision_qualifiers_glsl(expression_type(id), ir.meta[id].decoration.decoration_flags);
|
return flags_to_qualifiers_glsl(expression_type(id), ir.meta[id].decoration.decoration_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
string CompilerGLSL::to_qualifiers_glsl(uint32_t id)
|
string CompilerGLSL::to_qualifiers_glsl(uint32_t id)
|
||||||
@ -9664,6 +9874,12 @@ string CompilerGLSL::to_array_size(const SPIRType &type, uint32_t index)
|
|||||||
|
|
||||||
string CompilerGLSL::type_to_array_glsl(const SPIRType &type)
|
string CompilerGLSL::type_to_array_glsl(const SPIRType &type)
|
||||||
{
|
{
|
||||||
|
if (type.pointer && type.storage == StorageClassPhysicalStorageBufferEXT && type.basetype != SPIRType::Struct)
|
||||||
|
{
|
||||||
|
// We are using a wrapped pointer type, and we should not emit any array declarations here.
|
||||||
|
return "";
|
||||||
|
}
|
||||||
|
|
||||||
if (type.array.empty())
|
if (type.array.empty())
|
||||||
return "";
|
return "";
|
||||||
|
|
||||||
@ -9817,7 +10033,20 @@ string CompilerGLSL::type_to_glsl_constructor(const SPIRType &type)
|
|||||||
// depend on a specific object's use of that type.
|
// depend on a specific object's use of that type.
|
||||||
string CompilerGLSL::type_to_glsl(const SPIRType &type, uint32_t id)
|
string CompilerGLSL::type_to_glsl(const SPIRType &type, uint32_t id)
|
||||||
{
|
{
|
||||||
// Ignore the pointer type since GLSL doesn't have pointers.
|
if (type.pointer && type.storage == StorageClassPhysicalStorageBufferEXT && type.basetype != SPIRType::Struct)
|
||||||
|
{
|
||||||
|
// Need to create a magic type name which compacts the entire type information.
|
||||||
|
string name = type_to_glsl(get_pointee_type(type));
|
||||||
|
for (size_t i = 0; i < type.array.size(); i++)
|
||||||
|
{
|
||||||
|
if (type.array_size_literal[i])
|
||||||
|
name += join(type.array[i], "_");
|
||||||
|
else
|
||||||
|
name += join("id", type.array[i], "_");
|
||||||
|
}
|
||||||
|
name += "Pointer";
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
switch (type.basetype)
|
switch (type.basetype)
|
||||||
{
|
{
|
||||||
@ -10124,7 +10353,7 @@ void CompilerGLSL::emit_function_prototype(SPIRFunction &func, const Bitset &ret
|
|||||||
string decl;
|
string decl;
|
||||||
|
|
||||||
auto &type = get<SPIRType>(func.return_type);
|
auto &type = get<SPIRType>(func.return_type);
|
||||||
decl += flags_to_precision_qualifiers_glsl(type, return_flags);
|
decl += flags_to_qualifiers_glsl(type, return_flags);
|
||||||
decl += type_to_glsl(type);
|
decl += type_to_glsl(type);
|
||||||
decl += type_to_array_glsl(type);
|
decl += type_to_array_glsl(type);
|
||||||
decl += " ";
|
decl += " ";
|
||||||
@ -10931,7 +11160,7 @@ void CompilerGLSL::emit_hoisted_temporaries(SmallVector<pair<uint32_t, uint32_t>
|
|||||||
add_local_variable_name(tmp.second);
|
add_local_variable_name(tmp.second);
|
||||||
auto &flags = ir.meta[tmp.second].decoration.decoration_flags;
|
auto &flags = ir.meta[tmp.second].decoration.decoration_flags;
|
||||||
auto &type = get<SPIRType>(tmp.first);
|
auto &type = get<SPIRType>(tmp.first);
|
||||||
statement(flags_to_precision_qualifiers_glsl(type, flags), variable_decl(type, to_name(tmp.second)), ";");
|
statement(flags_to_qualifiers_glsl(type, flags), variable_decl(type, to_name(tmp.second)), ";");
|
||||||
|
|
||||||
hoisted_temporaries.insert(tmp.second);
|
hoisted_temporaries.insert(tmp.second);
|
||||||
forced_temporaries.insert(tmp.second);
|
forced_temporaries.insert(tmp.second);
|
||||||
|
@ -392,11 +392,13 @@ protected:
|
|||||||
bool supports_empty_struct = false;
|
bool supports_empty_struct = false;
|
||||||
bool array_is_value_type = true;
|
bool array_is_value_type = true;
|
||||||
bool comparison_image_samples_scalar = false;
|
bool comparison_image_samples_scalar = false;
|
||||||
|
bool native_pointers = false;
|
||||||
} backend;
|
} backend;
|
||||||
|
|
||||||
void emit_struct(SPIRType &type);
|
void emit_struct(SPIRType &type);
|
||||||
void emit_resources();
|
void emit_resources();
|
||||||
void emit_buffer_block_native(const SPIRVariable &var);
|
void emit_buffer_block_native(const SPIRVariable &var);
|
||||||
|
void emit_buffer_reference_block(SPIRType &type, bool forward_declaration);
|
||||||
void emit_buffer_block_legacy(const SPIRVariable &var);
|
void emit_buffer_block_legacy(const SPIRVariable &var);
|
||||||
void emit_buffer_block_flattened(const SPIRVariable &type);
|
void emit_buffer_block_flattened(const SPIRVariable &type);
|
||||||
void emit_declared_builtin_block(spv::StorageClass storage, spv::ExecutionModel model);
|
void emit_declared_builtin_block(spv::StorageClass storage, spv::ExecutionModel model);
|
||||||
@ -495,7 +497,7 @@ protected:
|
|||||||
std::string to_enclosed_pointer_expression(uint32_t id, bool register_expression_read = true);
|
std::string to_enclosed_pointer_expression(uint32_t id, bool register_expression_read = true);
|
||||||
std::string to_extract_component_expression(uint32_t id, uint32_t index);
|
std::string to_extract_component_expression(uint32_t id, uint32_t index);
|
||||||
std::string enclose_expression(const std::string &expr);
|
std::string enclose_expression(const std::string &expr);
|
||||||
std::string dereference_expression(const std::string &expr);
|
std::string dereference_expression(const SPIRType &expression_type, const std::string &expr);
|
||||||
std::string address_of_expression(const std::string &expr);
|
std::string address_of_expression(const std::string &expr);
|
||||||
void strip_enclosed_expression(std::string &expr);
|
void strip_enclosed_expression(std::string &expr);
|
||||||
std::string to_member_name(const SPIRType &type, uint32_t index);
|
std::string to_member_name(const SPIRType &type, uint32_t index);
|
||||||
@ -505,7 +507,7 @@ protected:
|
|||||||
virtual std::string to_qualifiers_glsl(uint32_t id);
|
virtual std::string to_qualifiers_glsl(uint32_t id);
|
||||||
const char *to_precision_qualifiers_glsl(uint32_t id);
|
const char *to_precision_qualifiers_glsl(uint32_t id);
|
||||||
virtual const char *to_storage_qualifiers_glsl(const SPIRVariable &var);
|
virtual const char *to_storage_qualifiers_glsl(const SPIRVariable &var);
|
||||||
const char *flags_to_precision_qualifiers_glsl(const SPIRType &type, const Bitset &flags);
|
const char *flags_to_qualifiers_glsl(const SPIRType &type, const Bitset &flags);
|
||||||
const char *format_to_glsl(spv::ImageFormat format);
|
const char *format_to_glsl(spv::ImageFormat format);
|
||||||
virtual std::string layout_for_member(const SPIRType &type, uint32_t index);
|
virtual std::string layout_for_member(const SPIRType &type, uint32_t index);
|
||||||
virtual std::string to_interpolation_qualifiers(const Bitset &flags);
|
virtual std::string to_interpolation_qualifiers(const Bitset &flags);
|
||||||
@ -518,6 +520,8 @@ protected:
|
|||||||
|
|
||||||
bool buffer_is_packing_standard(const SPIRType &type, BufferPackingStandard packing, uint32_t start_offset = 0,
|
bool buffer_is_packing_standard(const SPIRType &type, BufferPackingStandard packing, uint32_t start_offset = 0,
|
||||||
uint32_t end_offset = ~(0u));
|
uint32_t end_offset = ~(0u));
|
||||||
|
std::string buffer_to_packing_standard(const SPIRType &type);
|
||||||
|
|
||||||
uint32_t type_to_packed_base_size(const SPIRType &type, BufferPackingStandard packing);
|
uint32_t type_to_packed_base_size(const SPIRType &type, BufferPackingStandard packing);
|
||||||
uint32_t type_to_packed_alignment(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing);
|
uint32_t type_to_packed_alignment(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing);
|
||||||
uint32_t type_to_packed_array_stride(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing);
|
uint32_t type_to_packed_array_stride(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing);
|
||||||
|
@ -2006,7 +2006,7 @@ void CompilerHLSL::emit_function_prototype(SPIRFunction &func, const Bitset &ret
|
|||||||
auto &type = get<SPIRType>(func.return_type);
|
auto &type = get<SPIRType>(func.return_type);
|
||||||
if (type.array.empty())
|
if (type.array.empty())
|
||||||
{
|
{
|
||||||
decl += flags_to_precision_qualifiers_glsl(type, return_flags);
|
decl += flags_to_qualifiers_glsl(type, return_flags);
|
||||||
decl += type_to_glsl(type);
|
decl += type_to_glsl(type);
|
||||||
decl += " ";
|
decl += " ";
|
||||||
}
|
}
|
||||||
|
@ -584,6 +584,7 @@ string CompilerMSL::compile()
|
|||||||
backend.allow_truncated_access_chain = true;
|
backend.allow_truncated_access_chain = true;
|
||||||
backend.array_is_value_type = false;
|
backend.array_is_value_type = false;
|
||||||
backend.comparison_image_samples_scalar = true;
|
backend.comparison_image_samples_scalar = true;
|
||||||
|
backend.native_pointers = true;
|
||||||
|
|
||||||
capture_output_to_buffer = msl_options.capture_output_to_buffer;
|
capture_output_to_buffer = msl_options.capture_output_to_buffer;
|
||||||
is_rasterization_disabled = msl_options.disable_rasterization || capture_output_to_buffer;
|
is_rasterization_disabled = msl_options.disable_rasterization || capture_output_to_buffer;
|
||||||
|
@ -158,7 +158,6 @@ void Parser::parse(const Instruction &instruction)
|
|||||||
|
|
||||||
switch (op)
|
switch (op)
|
||||||
{
|
{
|
||||||
case OpMemoryModel:
|
|
||||||
case OpSourceContinued:
|
case OpSourceContinued:
|
||||||
case OpSourceExtension:
|
case OpSourceExtension:
|
||||||
case OpNop:
|
case OpNop:
|
||||||
@ -168,6 +167,11 @@ void Parser::parse(const Instruction &instruction)
|
|||||||
case OpModuleProcessed:
|
case OpModuleProcessed:
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case OpMemoryModel:
|
||||||
|
ir.addressing_model = static_cast<AddressingModel>(ops[0]);
|
||||||
|
ir.memory_model = static_cast<MemoryModel>(ops[1]);
|
||||||
|
break;
|
||||||
|
|
||||||
case OpSource:
|
case OpSource:
|
||||||
{
|
{
|
||||||
auto lang = static_cast<SourceLanguage>(ops[0]);
|
auto lang = static_cast<SourceLanguage>(ops[0]);
|
||||||
@ -598,6 +602,20 @@ void Parser::parse(const Instruction &instruction)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case OpTypeForwardPointer:
|
||||||
|
{
|
||||||
|
uint32_t id = ops[0];
|
||||||
|
auto &ptrbase = set<SPIRType>(id);
|
||||||
|
ptrbase.pointer = true;
|
||||||
|
ptrbase.pointer_depth++;
|
||||||
|
ptrbase.storage = static_cast<StorageClass>(ops[1]);
|
||||||
|
|
||||||
|
if (ptrbase.storage == StorageClassAtomicCounter)
|
||||||
|
ptrbase.basetype = SPIRType::AtomicCounter;
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
case OpTypeStruct:
|
case OpTypeStruct:
|
||||||
{
|
{
|
||||||
uint32_t id = ops[0];
|
uint32_t id = ops[0];
|
||||||
|
Loading…
Reference in New Issue
Block a user