Add support for GL_EXT_buffer_reference2

This commit is contained in:
Jeff Bolz 2019-03-08 10:42:49 -06:00
parent 86c72c9486
commit 758c93364c
12 changed files with 553 additions and 61 deletions

View File

@ -0,0 +1,274 @@
spv.bufferhandle18.frag
// Module Version 10000
// Generated by (magic number): 80007
// Id's are bound by 196
Capability Shader
Capability Int64
Capability CapabilityPhysicalStorageBufferAddressesEXT
Extension "SPV_EXT_physical_storage_buffer"
1: ExtInstImport "GLSL.std.450"
MemoryModel PhysicalStorageBuffer64EXT GLSL450
EntryPoint Fragment 4 "main"
ExecutionMode 4 OriginUpperLeft
Source GLSL 450
SourceExtension "GL_EXT_buffer_reference"
SourceExtension "GL_EXT_buffer_reference2"
SourceExtension "GL_EXT_scalar_block_layout"
SourceExtension "GL_EXT_shader_explicit_arithmetic_types_int64"
Name 4 "main"
Name 9 "T1"
MemberName 9(T1) 0 "x"
MemberName 9(T1) 1 "y"
Name 11 "a"
Name 15 "b"
Name 18 "c"
Name 24 "d"
Name 26 "e"
Name 29 "f"
Name 59 "arr"
Name 71 "i"
Name 130 "j"
Name 153 "k"
Name 160 "x"
Name 171 "y"
Name 178 "T2"
MemberName 178(T2) 0 "x"
Name 180 "m"
Name 190 "x"
Name 195 "buf"
MemberDecorate 9(T1) 0 Offset 0
MemberDecorate 9(T1) 1 Offset 4
Decorate 9(T1) Block
Decorate 11(a) DecorationAliasedPointerEXT
Decorate 15(b) DecorationAliasedPointerEXT
Decorate 18(c) DecorationAliasedPointerEXT
Decorate 24(d) DecorationAliasedPointerEXT
Decorate 26(e) DecorationAliasedPointerEXT
Decorate 29(f) DecorationAliasedPointerEXT
Decorate 59(arr) DecorationAliasedPointerEXT
Decorate 130(j) DecorationAliasedPointerEXT
Decorate 153(k) DecorationAliasedPointerEXT
MemberDecorate 178(T2) 0 Offset 0
Decorate 178(T2) Block
Decorate 180(m) DecorationAliasedPointerEXT
2: TypeVoid
3: TypeFunction 2
TypeForwardPointer 6 PhysicalStorageBufferEXT
7: TypeInt 32 1
8: TypeInt 32 0
9(T1): TypeStruct 7(int) 8(int)
6: TypePointer PhysicalStorageBufferEXT 9(T1)
10: TypePointer Function 6(ptr)
12: TypeInt 64 0
13: 12(int64_t) Constant 4 0
16: 12(int64_t) Constant 5 0
19: TypeBool
20: 19(bool) ConstantTrue
27: 12(int64_t) Constant 6 0
31: 7(int) Constant 1
32: TypePointer PhysicalStorageBufferEXT 8(int)
35: 8(int) Constant 0
37: 12(int64_t) Constant 8 0
39: 12(int64_t) Constant 9 0
44: 12(int64_t) Constant 24 0
47: 7(int) Constant 0
48: TypePointer PhysicalStorageBufferEXT 7(int)
52: 12(int64_t) Constant 40 0
56: 8(int) Constant 2
57: TypeArray 6(ptr) 56
58: TypePointer Function 57
66: 12(int64_t) Constant 56 0
70: TypePointer Function 7(int)
77: TypeInt 64 1
137: 12(int64_t) Constant 16 0
146: 12(int64_t) Constant 32 0
156: 12(int64_t) Constant 48 0
159: TypePointer Function 77(int64_t)
168: 77(int64_t) Constant 8 0
170: TypePointer Function 12(int64_t)
174: 12(int64_t) Constant 4294967288 4294967295
TypeForwardPointer 177 PhysicalStorageBufferEXT
178(T2): TypeStruct 7(int)
177: TypePointer PhysicalStorageBufferEXT 178(T2)
179: TypePointer Function 177(ptr)
183: 12(int64_t) Constant 64 0
186: 7(int) Constant 3
187: 8(int) Constant 3
188: TypeArray 7(int) 187
189: TypePointer Private 188
190(x): 189(ptr) Variable Private
191: 12(int64_t) Constant 10 0
192: 8(int) Constant 8
193: TypeArray 8(int) 192
194: TypePointer Private 193
195(buf): 194(ptr) Variable Private
4(main): 2 Function None 3
5: Label
11(a): 10(ptr) Variable Function
15(b): 10(ptr) Variable Function
18(c): 10(ptr) Variable Function
24(d): 10(ptr) Variable Function
26(e): 10(ptr) Variable Function
29(f): 10(ptr) Variable Function
59(arr): 58(ptr) Variable Function
71(i): 70(ptr) Variable Function
130(j): 10(ptr) Variable Function
153(k): 10(ptr) Variable Function
160(x): 159(ptr) Variable Function
171(y): 170(ptr) Variable Function
180(m): 179(ptr) Variable Function
14: 6(ptr) Bitcast 13
Store 11(a) 14
17: 6(ptr) Bitcast 16
Store 15(b) 17
21: 6(ptr) Load 11(a)
22: 6(ptr) Load 15(b)
23: 6(ptr) Select 20 21 22
Store 18(c) 23
25: 6(ptr) Load 15(b)
Store 24(d) 25
28: 6(ptr) Bitcast 27
Store 26(e) 28
30: 6(ptr) Load 11(a)
33: 32(ptr) AccessChain 30 31
34: 8(int) Load 33 Aligned 4
36: 19(bool) INotEqual 34 35
38: 6(ptr) Bitcast 37
40: 6(ptr) Bitcast 39
41: 6(ptr) Select 36 38 40
Store 29(f) 41
42: 6(ptr) Load 29(f)
43: 12(int64_t) ConvertPtrToU 42
45: 12(int64_t) IAdd 43 44
46: 6(ptr) ConvertUToPtr 45
49: 48(ptr) AccessChain 46 47
Store 49 31 Aligned 8
50: 6(ptr) Load 29(f)
51: 12(int64_t) ConvertPtrToU 50
53: 12(int64_t) IAdd 51 52
54: 6(ptr) ConvertUToPtr 53
55: 48(ptr) AccessChain 54 47
Store 55 31 Aligned 8
60: 6(ptr) Load 11(a)
61: 6(ptr) Load 29(f)
62: 57 CompositeConstruct 60 61
Store 59(arr) 62
63: 10(ptr) AccessChain 59(arr) 31
64: 6(ptr) Load 63
65: 12(int64_t) ConvertPtrToU 64
67: 12(int64_t) IAdd 65 66
68: 6(ptr) ConvertUToPtr 67
69: 48(ptr) AccessChain 68 47
Store 69 31 Aligned 8
72: 7(int) Load 71(i)
73: 10(ptr) AccessChain 59(arr) 72
74: 6(ptr) Load 73
75: 12(int64_t) ConvertPtrToU 74
76: 7(int) Load 71(i)
78: 77(int64_t) SConvert 76
79: 12(int64_t) Bitcast 78
80: 12(int64_t) IMul 79 37
81: 12(int64_t) IAdd 75 80
82: 6(ptr) ConvertUToPtr 81
83: 48(ptr) AccessChain 82 47
Store 83 31 Aligned 8
84: 7(int) Load 71(i)
85: 10(ptr) AccessChain 59(arr) 84
86: 6(ptr) Load 85
87: 12(int64_t) ConvertPtrToU 86
88: 7(int) Load 71(i)
89: 77(int64_t) SConvert 88
90: 12(int64_t) Bitcast 89
91: 12(int64_t) IMul 90 37
92: 12(int64_t) IAdd 87 91
93: 6(ptr) ConvertUToPtr 92
94: 12(int64_t) ConvertPtrToU 93
95: 7(int) Load 71(i)
96: 77(int64_t) SConvert 95
97: 12(int64_t) Bitcast 96
98: 12(int64_t) IMul 97 37
99: 12(int64_t) IAdd 94 98
100: 6(ptr) ConvertUToPtr 99
101: 12(int64_t) ConvertPtrToU 100
102: 7(int) Load 71(i)
103: 77(int64_t) SConvert 102
104: 12(int64_t) Bitcast 103
105: 12(int64_t) IMul 104 37
106: 12(int64_t) IAdd 101 105
107: 6(ptr) ConvertUToPtr 106
108: 12(int64_t) ConvertPtrToU 107
109: 7(int) Load 71(i)
110: 77(int64_t) SConvert 109
111: 12(int64_t) Bitcast 110
112: 12(int64_t) IMul 111 37
113: 12(int64_t) IAdd 108 112
114: 6(ptr) ConvertUToPtr 113
115: 12(int64_t) ConvertPtrToU 114
116: 7(int) Load 71(i)
117: 77(int64_t) SConvert 116
118: 12(int64_t) Bitcast 117
119: 12(int64_t) IMul 118 37
120: 12(int64_t) IAdd 115 119
121: 6(ptr) ConvertUToPtr 120
122: 12(int64_t) ConvertPtrToU 121
123: 7(int) Load 71(i)
124: 77(int64_t) SConvert 123
125: 12(int64_t) Bitcast 124
126: 12(int64_t) IMul 125 37
127: 12(int64_t) IAdd 122 126
128: 6(ptr) ConvertUToPtr 127
129: 48(ptr) AccessChain 128 47
Store 129 31 Aligned 8
131: 6(ptr) Load 130(j)
132: 12(int64_t) ConvertPtrToU 131
133: 12(int64_t) IAdd 132 37
134: 6(ptr) ConvertUToPtr 133
Store 130(j) 134
135: 6(ptr) Load 130(j)
136: 12(int64_t) ConvertPtrToU 135
138: 12(int64_t) ISub 136 137
139: 6(ptr) ConvertUToPtr 138
Store 130(j) 139
140: 6(ptr) Load 130(j)
141: 12(int64_t) ConvertPtrToU 140
142: 12(int64_t) IAdd 141 44
143: 6(ptr) ConvertUToPtr 142
Store 130(j) 143
144: 6(ptr) Load 130(j)
145: 12(int64_t) ConvertPtrToU 144
147: 12(int64_t) ISub 145 146
148: 6(ptr) ConvertUToPtr 147
Store 130(j) 148
149: 6(ptr) Load 130(j)
150: 12(int64_t) ConvertPtrToU 149
151: 12(int64_t) IAdd 52 150
152: 6(ptr) ConvertUToPtr 151
Store 130(j) 152
154: 6(ptr) Load 130(j)
155: 12(int64_t) ConvertPtrToU 154
157: 12(int64_t) IAdd 155 156
158: 6(ptr) ConvertUToPtr 157
Store 153(k) 158
161: 6(ptr) Load 153(k)
162: 12(int64_t) ConvertPtrToU 161
163: 77(int64_t) Bitcast 162
164: 6(ptr) Load 130(j)
165: 12(int64_t) ConvertPtrToU 164
166: 77(int64_t) Bitcast 165
167: 77(int64_t) ISub 163 166
169: 77(int64_t) SDiv 167 168
Store 160(x) 169
Store 171(y) 37
172: 6(ptr) Load 153(k)
173: 12(int64_t) ConvertPtrToU 172
175: 12(int64_t) IAdd 173 174
176: 6(ptr) ConvertUToPtr 175
Store 153(k) 176
181: 177(ptr) Load 180(m)
182: 12(int64_t) ConvertPtrToU 181
184: 12(int64_t) IAdd 182 183
185: 177(ptr) ConvertUToPtr 184
Store 180(m) 185
Return
FunctionEnd

View File

@ -0,0 +1,17 @@
spv.bufferhandle19_Errors.frag
ERROR: 0:18: '+' : wrong operand types: no operation '+' exists that takes a left-hand operand of type ' temp reference' and a right operand of type ' const int' (or there is no acceptable conversion)
ERROR: 0:19: '-' : wrong operand types: no operation '-' exists that takes a left-hand operand of type ' temp reference' and a right operand of type ' const int' (or there is no acceptable conversion)
ERROR: 0:20: '+' : wrong operand types: no operation '+' exists that takes a left-hand operand of type ' const int' and a right operand of type ' temp reference' (or there is no acceptable conversion)
ERROR: 0:21: '-' : wrong operand types: no operation '-' exists that takes a left-hand operand of type ' temp reference' and a right operand of type ' temp reference' (or there is no acceptable conversion)
ERROR: 0:22: 'assign' : cannot convert from ' const int' to ' temp reference'
ERROR: 0:23: 'assign' : cannot convert from ' const int' to ' temp reference'
ERROR: 0:24: 'assign' : cannot convert from ' temp reference' to ' temp reference'
ERROR: 0:25: 'assign' : cannot convert from ' temp reference' to ' temp reference'
ERROR: 0:28: 'assign' : cannot convert from ' temp reference' to ' temp reference'
ERROR: 0:29: 'assign' : cannot convert from ' temp reference' to ' temp reference'
ERROR: 0:30: '+' : wrong operand types: no operation '+' exists that takes a left-hand operand of type ' temp reference' and a right operand of type ' temp reference' (or there is no acceptable conversion)
ERROR: 0:31: '-' : wrong operand types: no operation '-' exists that takes a left-hand operand of type ' const int' and a right operand of type ' temp reference' (or there is no acceptable conversion)
ERROR: 12 compilation errors. No code generated.
SPIR-V is not generated for failed compile or link

View File

@ -0,0 +1,59 @@
#version 450
#extension GL_EXT_shader_explicit_arithmetic_types_int64 : enable
#extension GL_EXT_buffer_reference2 : enable
#extension GL_EXT_scalar_block_layout : enable
layout(buffer_reference, buffer_reference_align = 8) buffer T1 {
int x;
bool y;
};
layout(buffer_reference, buffer_reference_align = 64) buffer T2 {
int x;
};
const int s = int(uint64_t(T1(T2(uint64_t(3)))));
int x[s];
const uint64_t t = uint64_t(true ? T2(uint64_t(10)) : T2(uint64_t(11)));
#define sizeof(T) (uint64_t(T(uint64_t(0))+1))
const uint64_t s2 = sizeof(T1);
uint buf[int(s2)];
void main()
{
T1 a = T1(uint64_t(4)), b = T1(uint64_t(5));
T1 c = true ? a : b;
T1 d = (a,b);
T1 e = true ? T1(uint64_t(6)) : T1(uint64_t(7));
T1 f = a.y ? T1(uint64_t(8)) : T1(uint64_t(9));
f[3].x = 1;
(f+5).x = 1;
T1 arr[2] = {a, f};
arr[1][7].x = 1;
int i;
arr[i][i].x = 1;
// Since we don't distinguish between "pointer" and "reference" type,
// a reference type can have [] applied to it repeatedly and it has
// the effect of adding up the indices.
arr[i][i][i][i][i][i][i].x = 1;
T1 j;
j = j+1;
j = j-2;
j += 3;
j -= 4;
j = 5+j;
T1 k = j + 6;
int64_t x = k - j;
uint64_t y = sizeof(T1);
k = k + (-1);
T2 m;
m = m+1;
}

View File

@ -0,0 +1,32 @@
#version 450
#extension GL_EXT_shader_explicit_arithmetic_types_int64 : enable
#extension GL_EXT_buffer_reference2 : enable
#extension GL_EXT_scalar_block_layout : enable
layout(buffer_reference) buffer T1 {
int x[];
};
layout(buffer_reference) buffer T2 {
int x[2];
};
void main()
{
T1 a;
a+1;
a-1;
1+a;
a-a;
a+=1;
a-=1;
a+=a;
a-=a;
T2 b;
b+=b;
b-=b;
b+b;
1-b;
}

View File

@ -119,6 +119,62 @@ TIntermTyped* TIntermediate::addBinaryMath(TOperator op, TIntermTyped* left, TIn
if (left->getType().getBasicType() == EbtBlock || right->getType().getBasicType() == EbtBlock)
return nullptr;
// Convert "reference +/- int" and "reference - reference" to integer math
if ((op == EOpAdd || op == EOpSub) && extensionRequested(E_GL_EXT_buffer_reference2)) {
// No addressing math on struct with unsized array.
if ((left->getBasicType() == EbtReference && left->getType().getReferentType()->containsUnsizedArray()) ||
(right->getBasicType() == EbtReference && right->getType().getReferentType()->containsUnsizedArray())) {
return nullptr;
}
if (left->getBasicType() == EbtReference && isTypeInt(right->getBasicType())) {
const TType& referenceType = left->getType();
TIntermConstantUnion* size = addConstantUnion((unsigned long long)computeBufferReferenceTypeSize(left->getType()), loc, true);
left = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, left, TType(EbtUint64));
right = createConversion(EbtInt64, right);
right = addBinaryMath(EOpMul, right, size, loc);
TIntermTyped *node = addBinaryMath(op, left, right, loc);
node = addBuiltInFunctionCall(loc, EOpConvUint64ToPtr, true, node, referenceType);
return node;
}
if (op == EOpAdd && right->getBasicType() == EbtReference && isTypeInt(left->getBasicType())) {
const TType& referenceType = right->getType();
TIntermConstantUnion* size = addConstantUnion((unsigned long long)computeBufferReferenceTypeSize(right->getType()), loc, true);
right = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, right, TType(EbtUint64));
left = createConversion(EbtInt64, left);
left = addBinaryMath(EOpMul, left, size, loc);
TIntermTyped *node = addBinaryMath(op, left, right, loc);
node = addBuiltInFunctionCall(loc, EOpConvUint64ToPtr, true, node, referenceType);
return node;
}
if (op == EOpSub && left->getBasicType() == EbtReference && right->getBasicType() == EbtReference) {
TIntermConstantUnion* size = addConstantUnion((long long)computeBufferReferenceTypeSize(left->getType()), loc, true);
left = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, left, TType(EbtUint64));
right = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, right, TType(EbtUint64));
left = addBuiltInFunctionCall(loc, EOpConvUint64ToInt64, true, left, TType(EbtInt64));
right = addBuiltInFunctionCall(loc, EOpConvUint64ToInt64, true, right, TType(EbtInt64));
left = addBinaryMath(EOpSub, left, right, loc);
TIntermTyped *node = addBinaryMath(EOpDiv, left, size, loc);
return node;
}
// No other math operators supported on references
if (left->getBasicType() == EbtReference || right->getBasicType() == EbtReference) {
return nullptr;
}
}
// Try converting the children's base types to compatible types.
auto children = addConversion(op, left, right);
left = std::get<0>(children);
@ -231,6 +287,26 @@ TIntermTyped* TIntermediate::addAssign(TOperator op, TIntermTyped* left, TInterm
if (left->getType().getBasicType() == EbtBlock || right->getType().getBasicType() == EbtBlock)
return nullptr;
// Convert "reference += int" to "reference = reference + int". We need this because the
// "reference + int" calculation involves a cast back to the original type, which makes it
// not an lvalue.
if ((op == EOpAddAssign || op == EOpSubAssign) && left->getBasicType() == EbtReference &&
extensionRequested(E_GL_EXT_buffer_reference2)) {
if (!(right->getType().isScalar() && right->getType().isIntegerDomain()))
return nullptr;
TIntermTyped* node = addBinaryMath(op == EOpAddAssign ? EOpAdd : EOpSub, left, right, loc);
if (!node)
return nullptr;
TIntermSymbol* symbol = left->getAsSymbolNode();
left = addSymbol(*symbol);
node = addAssign(EOpAssign, left, node, loc);
return node;
}
//
// Like adding binary math, except the conversion can only go
// from right to left.

View File

@ -377,7 +377,8 @@ TIntermTyped* TParseContext::handleBracketDereference(const TSourceLoc& loc, TIn
// basic type checks...
variableCheck(base);
if (! base->isArray() && ! base->isMatrix() && ! base->isVector() && ! base->getType().isCoopMat()) {
if (! base->isArray() && ! base->isMatrix() && ! base->isVector() && ! base->getType().isCoopMat() &&
base->getBasicType() != EbtReference) {
if (base->getAsSymbolNode())
error(loc, " left of '[' is not of type array, matrix, or vector ", base->getAsSymbolNode()->getName().c_str(), "");
else
@ -405,6 +406,14 @@ TIntermTyped* TParseContext::handleBracketDereference(const TSourceLoc& loc, TIn
// at least one of base and index is not a front-end constant variable...
TIntermTyped* result = nullptr;
if (base->getBasicType() == EbtReference && ! base->isArray()) {
requireExtensions(loc, 1, &E_GL_EXT_buffer_reference2, "buffer reference indexing");
result = intermediate.addBinaryMath(EOpAdd, base, index, loc);
result->setType(base->getType());
return result;
}
if (index->getQualifier().isFrontEndConstant())
checkIndex(loc, base->getType(), indexValue);

View File

@ -209,6 +209,7 @@ void TParseVersions::initializeExtensionBehavior()
extensionBehavior[E_GL_EXT_scalar_block_layout] = EBhDisable;
extensionBehavior[E_GL_EXT_fragment_invocation_density] = EBhDisable;
extensionBehavior[E_GL_EXT_buffer_reference] = EBhDisable;
extensionBehavior[E_GL_EXT_buffer_reference2] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_16bit_storage] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_8bit_storage] = EBhDisable;
@ -389,6 +390,7 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define GL_EXT_scalar_block_layout 1\n"
"#define GL_EXT_fragment_invocation_density 1\n"
"#define GL_EXT_buffer_reference 1\n"
"#define GL_EXT_buffer_reference2 1\n"
// GL_KHR_shader_subgroup
"#define GL_KHR_shader_subgroup_basic 1\n"
@ -808,6 +810,8 @@ void TParseVersions::updateExtensionBehavior(int line, const char* extension, co
else if (strcmp(extension, "GL_NV_shader_subgroup_partitioned") == 0)
updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
#endif
else if (strcmp(extension, "GL_EXT_buffer_reference2") == 0)
updateExtensionBehavior(line, "GL_EXT_buffer_reference", behaviorString);
}
void TParseVersions::updateExtensionBehavior(const char* extension, TExtensionBehavior behavior)

1
glslang/MachineIndependent/Versions.h Executable file → Normal file
View File

@ -171,6 +171,7 @@ const char* const E_GL_EXT_samplerless_texture_functions = "GL_EXT_samplerles
const char* const E_GL_EXT_scalar_block_layout = "GL_EXT_scalar_block_layout";
const char* const E_GL_EXT_fragment_invocation_density = "GL_EXT_fragment_invocation_density";
const char* const E_GL_EXT_buffer_reference = "GL_EXT_buffer_reference";
const char* const E_GL_EXT_buffer_reference2 = "GL_EXT_buffer_reference2";
// Arrays of extensions for the above viewportEXTs duplications

70
glslang/MachineIndependent/linkValidate.cpp Executable file → Normal file
View File

@ -1683,4 +1683,74 @@ int TIntermediate::getMemberAlignment(const TType& type, int& size, int& stride,
}
}
// shared calculation by getOffset and getOffsets
void TIntermediate::updateOffset(const TType& parentType, const TType& memberType, int& offset, int& memberSize)
{
int dummyStride;
// modify just the children's view of matrix layout, if there is one for this member
TLayoutMatrix subMatrixLayout = memberType.getQualifier().layoutMatrix;
int memberAlignment = getMemberAlignment(memberType, memberSize, dummyStride,
parentType.getQualifier().layoutPacking,
subMatrixLayout != ElmNone
? subMatrixLayout == ElmRowMajor
: parentType.getQualifier().layoutMatrix == ElmRowMajor);
RoundToPow2(offset, memberAlignment);
}
// Lookup or calculate the offset of a block member, using the recursively
// defined block offset rules.
int TIntermediate::getOffset(const TType& type, int index)
{
const TTypeList& memberList = *type.getStruct();
// Don't calculate offset if one is present, it could be user supplied
// and different than what would be calculated. That is, this is faster,
// but not just an optimization.
if (memberList[index].type->getQualifier().hasOffset())
return memberList[index].type->getQualifier().layoutOffset;
int memberSize = 0;
int offset = 0;
for (int m = 0; m <= index; ++m) {
updateOffset(type, *memberList[m].type, offset, memberSize);
if (m < index)
offset += memberSize;
}
return offset;
}
// Calculate the block data size.
// Block arrayness is not taken into account, each element is backed by a separate buffer.
int TIntermediate::getBlockSize(const TType& blockType)
{
const TTypeList& memberList = *blockType.getStruct();
int lastIndex = (int)memberList.size() - 1;
int lastOffset = getOffset(blockType, lastIndex);
int lastMemberSize;
int dummyStride;
getMemberAlignment(*memberList[lastIndex].type, lastMemberSize, dummyStride,
blockType.getQualifier().layoutPacking,
blockType.getQualifier().layoutMatrix == ElmRowMajor);
return lastOffset + lastMemberSize;
}
int TIntermediate::computeBufferReferenceTypeSize(const TType& type)
{
assert(type.getBasicType() == EbtReference);
int size = getBlockSize(*type.getReferentType());
int align = type.getBufferReferenceAlignment();
if (align) {
size = (size + align - 1) & ~(align-1);
}
return size;
}
} // end namespace glslang

View File

@ -696,6 +696,10 @@ public:
static int getScalarAlignment(const TType&, int& size, int& stride, bool rowMajor);
static int getMemberAlignment(const TType&, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor);
static bool improperStraddle(const TType& type, int size, int offset);
static void updateOffset(const TType& parentType, const TType& memberType, int& offset, int& memberSize);
static int getOffset(const TType& type, int index);
static int getBlockSize(const TType& blockType);
static int computeBufferReferenceTypeSize(const TType&);
bool promote(TIntermOperator*);
#ifdef NV_EXTENSIONS

View File

@ -143,45 +143,6 @@ public:
}
}
// shared calculation by getOffset and getOffsets
void updateOffset(const TType& parentType, const TType& memberType, int& offset, int& memberSize)
{
int dummyStride;
// modify just the children's view of matrix layout, if there is one for this member
TLayoutMatrix subMatrixLayout = memberType.getQualifier().layoutMatrix;
int memberAlignment = intermediate.getMemberAlignment(memberType, memberSize, dummyStride,
parentType.getQualifier().layoutPacking,
subMatrixLayout != ElmNone
? subMatrixLayout == ElmRowMajor
: parentType.getQualifier().layoutMatrix == ElmRowMajor);
RoundToPow2(offset, memberAlignment);
}
// Lookup or calculate the offset of a block member, using the recursively
// defined block offset rules.
int getOffset(const TType& type, int index)
{
const TTypeList& memberList = *type.getStruct();
// Don't calculate offset if one is present, it could be user supplied
// and different than what would be calculated. That is, this is faster,
// but not just an optimization.
if (memberList[index].type->getQualifier().hasOffset())
return memberList[index].type->getQualifier().layoutOffset;
int memberSize = 0;
int offset = 0;
for (int m = 0; m <= index; ++m) {
updateOffset(type, *memberList[m].type, offset, memberSize);
if (m < index)
offset += memberSize;
}
return offset;
}
// Lookup or calculate the offset of all block members at once, using the recursively
// defined block offset rules.
void getOffsets(const TType& type, TVector<int>& offsets)
@ -196,7 +157,7 @@ public:
offset = memberList[m].type->getQualifier().layoutOffset;
// calculate the offset of the next member and align the current offset to this member
updateOffset(type, *memberList[m].type, offset, memberSize);
intermediate.updateOffset(type, *memberList[m].type, offset, memberSize);
// save the offset of this member
offsets[m] = offset;
@ -226,23 +187,6 @@ public:
return stride;
}
// Calculate the block data size.
// Block arrayness is not taken into account, each element is backed by a separate buffer.
int getBlockSize(const TType& blockType)
{
const TTypeList& memberList = *blockType.getStruct();
int lastIndex = (int)memberList.size() - 1;
int lastOffset = getOffset(blockType, lastIndex);
int lastMemberSize;
int dummyStride;
intermediate.getMemberAlignment(*memberList[lastIndex].type, lastMemberSize, dummyStride,
blockType.getQualifier().layoutPacking,
blockType.getQualifier().layoutMatrix == ElmRowMajor);
return lastOffset + lastMemberSize;
}
// count the total number of leaf members from iterating out of a block type
int countAggregateMembers(const TType& parentType)
{
@ -349,7 +293,7 @@ public:
case EOpIndexDirectStruct:
index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
if (offset >= 0)
offset += getOffset(visitNode->getLeft()->getType(), index);
offset += intermediate.getOffset(visitNode->getLeft()->getType(), index);
if (name.size() > 0)
name.append(".");
name.append((*visitNode->getLeft()->getType().getStruct())[index].type->getFieldName());
@ -592,10 +536,10 @@ public:
assert(! anonymous);
for (int e = 0; e < base->getType().getCumulativeArraySize(); ++e)
blockIndex = addBlockName(blockName + "[" + String(e) + "]", derefType,
getBlockSize(base->getType()));
intermediate.getBlockSize(base->getType()));
baseName.append(TString("[0]"));
} else
blockIndex = addBlockName(blockName, base->getType(), getBlockSize(base->getType()));
blockIndex = addBlockName(blockName, base->getType(), intermediate.getBlockSize(base->getType()));
if (reflection.options & EShReflectionAllBlockVariables) {
// Use a degenerate (empty) set of dereferences to immediately put as at the end of

View File

@ -274,6 +274,8 @@ INSTANTIATE_TEST_CASE_P(
"spv.bufferhandle15.frag",
"spv.bufferhandle16.frag",
"spv.bufferhandle17_Errors.frag",
"spv.bufferhandle18.frag",
"spv.bufferhandle19_Errors.frag",
"spv.bufferhandle2.frag",
"spv.bufferhandle3.frag",
"spv.bufferhandle4.frag",