|
|
|
@ -1451,7 +1451,7 @@ uint32_t CompilerMSL::build_extended_vector_type(uint32_t type_id, uint32_t comp
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void CompilerMSL::add_plain_variable_to_interface_block(StorageClass storage, const string &ib_var_ref,
|
|
|
|
|
SPIRType &ib_type, SPIRVariable &var, bool strip_array)
|
|
|
|
|
SPIRType &ib_type, SPIRVariable &var, InterfaceBlockMeta &meta)
|
|
|
|
|
{
|
|
|
|
|
bool is_builtin = is_builtin_variable(var);
|
|
|
|
|
BuiltIn builtin = BuiltIn(get_decoration(var.self, DecorationBuiltIn));
|
|
|
|
@ -1466,17 +1466,32 @@ void CompilerMSL::add_plain_variable_to_interface_block(StorageClass storage, co
|
|
|
|
|
var.basetype = type_id;
|
|
|
|
|
|
|
|
|
|
type_id = get_pointee_type_id(var.basetype);
|
|
|
|
|
if (strip_array && is_array(get<SPIRType>(type_id)))
|
|
|
|
|
if (meta.strip_array && is_array(get<SPIRType>(type_id)))
|
|
|
|
|
type_id = get<SPIRType>(type_id).parent_type;
|
|
|
|
|
auto &type = get<SPIRType>(type_id);
|
|
|
|
|
uint32_t target_components = 0;
|
|
|
|
|
uint32_t type_components = type.vecsize;
|
|
|
|
|
|
|
|
|
|
bool padded_output = false;
|
|
|
|
|
bool padded_input = false;
|
|
|
|
|
uint32_t start_component = 0;
|
|
|
|
|
|
|
|
|
|
auto &entry_func = get<SPIRFunction>(ir.default_entry_point);
|
|
|
|
|
|
|
|
|
|
// Deal with Component decorations.
|
|
|
|
|
InterfaceBlockMeta::LocationMeta *location_meta = nullptr;
|
|
|
|
|
if (has_decoration(var.self, DecorationLocation))
|
|
|
|
|
{
|
|
|
|
|
auto location_meta_itr = meta.location_meta.find(get_decoration(var.self, DecorationLocation));
|
|
|
|
|
if (location_meta_itr != end(meta.location_meta))
|
|
|
|
|
location_meta = &location_meta_itr->second;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check if we need to pad fragment output to match a certain number of components.
|
|
|
|
|
if (get_decoration_bitset(var.self).get(DecorationLocation) && msl_options.pad_fragment_output_components &&
|
|
|
|
|
get_entry_point().model == ExecutionModelFragment && storage == StorageClassOutput)
|
|
|
|
|
{
|
|
|
|
|
assert(!location_meta);
|
|
|
|
|
uint32_t locn = get_decoration(var.self, DecorationLocation);
|
|
|
|
|
target_components = get_target_components_for_fragment_location(locn);
|
|
|
|
|
if (type_components < target_components)
|
|
|
|
@ -1487,6 +1502,47 @@ void CompilerMSL::add_plain_variable_to_interface_block(StorageClass storage, co
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (location_meta)
|
|
|
|
|
{
|
|
|
|
|
start_component = get_decoration(var.self, DecorationComponent);
|
|
|
|
|
uint32_t num_components = location_meta->num_components;
|
|
|
|
|
|
|
|
|
|
if (location_meta->ib_index != ~0u)
|
|
|
|
|
{
|
|
|
|
|
// We have already declared the variable. Just emit an early-declared variable and fixup as needed.
|
|
|
|
|
entry_func.add_local_variable(var.self);
|
|
|
|
|
vars_needing_early_declaration.push_back(var.self);
|
|
|
|
|
|
|
|
|
|
if (var.storage == StorageClassInput)
|
|
|
|
|
{
|
|
|
|
|
uint32_t ib_index = location_meta->ib_index;
|
|
|
|
|
entry_func.fixup_hooks_in.push_back([=, &var]() {
|
|
|
|
|
statement(to_name(var.self), " = ", ib_var_ref, ".", to_member_name(ib_type, ib_index),
|
|
|
|
|
vector_swizzle(type_components, start_component), ";");
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
uint32_t ib_index = location_meta->ib_index;
|
|
|
|
|
entry_func.fixup_hooks_out.push_back([=, &var]() {
|
|
|
|
|
statement(ib_var_ref, ".", to_member_name(ib_type, ib_index),
|
|
|
|
|
vector_swizzle(type_components, start_component), " = ",
|
|
|
|
|
to_name(var.self), ";");
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
location_meta->ib_index = uint32_t(ib_type.member_types.size());
|
|
|
|
|
type_id = build_extended_vector_type(type_id, num_components);
|
|
|
|
|
if (var.storage == StorageClassInput)
|
|
|
|
|
padded_input = true;
|
|
|
|
|
else
|
|
|
|
|
padded_output = true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ib_type.member_types.push_back(type_id);
|
|
|
|
|
|
|
|
|
|
// Give the member a name
|
|
|
|
@ -1495,25 +1551,45 @@ void CompilerMSL::add_plain_variable_to_interface_block(StorageClass storage, co
|
|
|
|
|
|
|
|
|
|
// Update the original variable reference to include the structure reference
|
|
|
|
|
string qual_var_name = ib_var_ref + "." + mbr_name;
|
|
|
|
|
auto &entry_func = get<SPIRFunction>(ir.default_entry_point);
|
|
|
|
|
|
|
|
|
|
if (padded_output)
|
|
|
|
|
if (padded_output || padded_input)
|
|
|
|
|
{
|
|
|
|
|
entry_func.add_local_variable(var.self);
|
|
|
|
|
vars_needing_early_declaration.push_back(var.self);
|
|
|
|
|
|
|
|
|
|
entry_func.fixup_hooks_out.push_back([=, &var]() {
|
|
|
|
|
SPIRType &padded_type = this->get<SPIRType>(type_id);
|
|
|
|
|
statement(qual_var_name, " = ", remap_swizzle(padded_type, type_components, to_name(var.self)), ";");
|
|
|
|
|
});
|
|
|
|
|
if (padded_output)
|
|
|
|
|
{
|
|
|
|
|
entry_func.fixup_hooks_out.push_back([=, &var]()
|
|
|
|
|
{
|
|
|
|
|
statement(qual_var_name,
|
|
|
|
|
vector_swizzle(type_components, start_component), " = ",
|
|
|
|
|
to_name(var.self), ";");
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
entry_func.fixup_hooks_in.push_back([=, &var]() {
|
|
|
|
|
statement(to_name(var.self), " = ", qual_var_name, vector_swizzle(type_components, start_component), ";");
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else if (!strip_array)
|
|
|
|
|
else if (!meta.strip_array)
|
|
|
|
|
ir.meta[var.self].decoration.qualified_alias = qual_var_name;
|
|
|
|
|
|
|
|
|
|
if (var.storage == StorageClassOutput && var.initializer != ID(0))
|
|
|
|
|
{
|
|
|
|
|
entry_func.fixup_hooks_in.push_back(
|
|
|
|
|
[=, &var]() { statement(qual_var_name, " = ", to_expression(var.initializer), ";"); });
|
|
|
|
|
if (padded_output || padded_input)
|
|
|
|
|
{
|
|
|
|
|
entry_func.fixup_hooks_in.push_back(
|
|
|
|
|
[=, &var]()
|
|
|
|
|
{ statement(to_name(var.self), " = ", to_expression(var.initializer), ";"); });
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
entry_func.fixup_hooks_in.push_back(
|
|
|
|
|
[=, &var]()
|
|
|
|
|
{ statement(qual_var_name, " = ", to_expression(var.initializer), ";"); });
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Copy the variable location from the original variable to the member
|
|
|
|
@ -1522,10 +1598,14 @@ void CompilerMSL::add_plain_variable_to_interface_block(StorageClass storage, co
|
|
|
|
|
uint32_t locn = get_decoration(var.self, DecorationLocation);
|
|
|
|
|
if (storage == StorageClassInput && (get_execution_model() == ExecutionModelVertex || is_tessellation_shader()))
|
|
|
|
|
{
|
|
|
|
|
type_id = ensure_correct_attribute_type(var.basetype, locn);
|
|
|
|
|
var.basetype = type_id;
|
|
|
|
|
type_id = ensure_correct_attribute_type(var.basetype, locn,
|
|
|
|
|
location_meta ? location_meta->num_components : type.vecsize);
|
|
|
|
|
|
|
|
|
|
if (!location_meta)
|
|
|
|
|
var.basetype = type_id;
|
|
|
|
|
|
|
|
|
|
type_id = get_pointee_type_id(type_id);
|
|
|
|
|
if (strip_array && is_array(get<SPIRType>(type_id)))
|
|
|
|
|
if (meta.strip_array && is_array(get<SPIRType>(type_id)))
|
|
|
|
|
type_id = get<SPIRType>(type_id).parent_type;
|
|
|
|
|
ib_type.member_types[ib_mbr_idx] = type_id;
|
|
|
|
|
}
|
|
|
|
@ -1539,10 +1619,13 @@ void CompilerMSL::add_plain_variable_to_interface_block(StorageClass storage, co
|
|
|
|
|
mark_location_as_used_by_shader(locn, storage);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (get_decoration_bitset(var.self).get(DecorationComponent))
|
|
|
|
|
if (!location_meta)
|
|
|
|
|
{
|
|
|
|
|
uint32_t comp = get_decoration(var.self, DecorationComponent);
|
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationComponent, comp);
|
|
|
|
|
if (get_decoration_bitset(var.self).get(DecorationComponent))
|
|
|
|
|
{
|
|
|
|
|
uint32_t component = get_decoration(var.self, DecorationComponent);
|
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationComponent, component);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (get_decoration_bitset(var.self).get(DecorationIndex))
|
|
|
|
@ -1573,10 +1656,10 @@ void CompilerMSL::add_plain_variable_to_interface_block(StorageClass storage, co
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void CompilerMSL::add_composite_variable_to_interface_block(StorageClass storage, const string &ib_var_ref,
|
|
|
|
|
SPIRType &ib_type, SPIRVariable &var, bool strip_array)
|
|
|
|
|
SPIRType &ib_type, SPIRVariable &var, InterfaceBlockMeta &meta)
|
|
|
|
|
{
|
|
|
|
|
auto &entry_func = get<SPIRFunction>(ir.default_entry_point);
|
|
|
|
|
auto &var_type = strip_array ? get_variable_element_type(var) : get_variable_data_type(var);
|
|
|
|
|
auto &var_type = meta.strip_array ? get_variable_element_type(var) : get_variable_data_type(var);
|
|
|
|
|
uint32_t elem_cnt = 0;
|
|
|
|
|
|
|
|
|
|
if (is_matrix(var_type))
|
|
|
|
@ -1625,7 +1708,7 @@ void CompilerMSL::add_composite_variable_to_interface_block(StorageClass storage
|
|
|
|
|
// not from a function variable.
|
|
|
|
|
flatten_from_ib_var = true;
|
|
|
|
|
}
|
|
|
|
|
else if (!strip_array)
|
|
|
|
|
else if (!meta.strip_array)
|
|
|
|
|
{
|
|
|
|
|
// Only flatten/unflatten IO composites for non-tessellation cases where arrays are not stripped.
|
|
|
|
|
entry_func.add_local_variable(var.self);
|
|
|
|
@ -1708,7 +1791,7 @@ void CompilerMSL::add_composite_variable_to_interface_block(StorageClass storage
|
|
|
|
|
set_extended_member_decoration(ib_type.self, ib_mbr_idx, SPIRVCrossDecorationInterfaceOrigID, var.self);
|
|
|
|
|
|
|
|
|
|
// Only flatten/unflatten IO composites for non-tessellation cases where arrays are not stripped.
|
|
|
|
|
if (!strip_array)
|
|
|
|
|
if (!meta.strip_array)
|
|
|
|
|
{
|
|
|
|
|
switch (storage)
|
|
|
|
|
{
|
|
|
|
@ -1771,10 +1854,10 @@ uint32_t CompilerMSL::get_accumulated_member_location(const SPIRVariable &var, u
|
|
|
|
|
|
|
|
|
|
void CompilerMSL::add_composite_member_variable_to_interface_block(StorageClass storage, const string &ib_var_ref,
|
|
|
|
|
SPIRType &ib_type, SPIRVariable &var,
|
|
|
|
|
uint32_t mbr_idx, bool strip_array)
|
|
|
|
|
uint32_t mbr_idx, InterfaceBlockMeta &meta)
|
|
|
|
|
{
|
|
|
|
|
auto &entry_func = get<SPIRFunction>(ir.default_entry_point);
|
|
|
|
|
auto &var_type = strip_array ? get_variable_element_type(var) : get_variable_data_type(var);
|
|
|
|
|
auto &var_type = meta.strip_array ? get_variable_element_type(var) : get_variable_data_type(var);
|
|
|
|
|
|
|
|
|
|
BuiltIn builtin = BuiltInMax;
|
|
|
|
|
bool is_builtin = is_member_builtin(var_type, mbr_idx, &builtin);
|
|
|
|
@ -1845,7 +1928,7 @@ void CompilerMSL::add_composite_member_variable_to_interface_block(StorageClass
|
|
|
|
|
}
|
|
|
|
|
else if (has_decoration(var.self, DecorationLocation))
|
|
|
|
|
{
|
|
|
|
|
uint32_t locn = get_accumulated_member_location(var, mbr_idx, strip_array) + i;
|
|
|
|
|
uint32_t locn = get_accumulated_member_location(var, mbr_idx, meta.strip_array) + i;
|
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn);
|
|
|
|
|
mark_location_as_used_by_shader(locn, storage);
|
|
|
|
|
}
|
|
|
|
@ -1879,7 +1962,7 @@ void CompilerMSL::add_composite_member_variable_to_interface_block(StorageClass
|
|
|
|
|
set_extended_member_decoration(ib_type.self, ib_mbr_idx, SPIRVCrossDecorationInterfaceMemberIndex, mbr_idx);
|
|
|
|
|
|
|
|
|
|
// Unflatten or flatten from [[stage_in]] or [[stage_out]] as appropriate.
|
|
|
|
|
if (!strip_array)
|
|
|
|
|
if (!meta.strip_array)
|
|
|
|
|
{
|
|
|
|
|
switch (storage)
|
|
|
|
|
{
|
|
|
|
@ -1914,9 +1997,9 @@ void CompilerMSL::add_composite_member_variable_to_interface_block(StorageClass
|
|
|
|
|
|
|
|
|
|
void CompilerMSL::add_plain_member_variable_to_interface_block(StorageClass storage, const string &ib_var_ref,
|
|
|
|
|
SPIRType &ib_type, SPIRVariable &var, uint32_t mbr_idx,
|
|
|
|
|
bool strip_array)
|
|
|
|
|
InterfaceBlockMeta &meta)
|
|
|
|
|
{
|
|
|
|
|
auto &var_type = strip_array ? get_variable_element_type(var) : get_variable_data_type(var);
|
|
|
|
|
auto &var_type = meta.strip_array ? get_variable_element_type(var) : get_variable_data_type(var);
|
|
|
|
|
auto &entry_func = get<SPIRFunction>(ir.default_entry_point);
|
|
|
|
|
|
|
|
|
|
BuiltIn builtin = BuiltInMax;
|
|
|
|
@ -1944,13 +2027,13 @@ void CompilerMSL::add_plain_member_variable_to_interface_block(StorageClass stor
|
|
|
|
|
// Update the original variable reference to include the structure reference
|
|
|
|
|
string qual_var_name = ib_var_ref + "." + mbr_name;
|
|
|
|
|
|
|
|
|
|
if (is_builtin && !strip_array)
|
|
|
|
|
if (is_builtin && !meta.strip_array)
|
|
|
|
|
{
|
|
|
|
|
// For the builtin gl_PerVertex, we cannot treat it as a block anyways,
|
|
|
|
|
// so redirect to qualified name.
|
|
|
|
|
set_member_qualified_name(var_type.self, mbr_idx, qual_var_name);
|
|
|
|
|
}
|
|
|
|
|
else if (!strip_array)
|
|
|
|
|
else if (!meta.strip_array)
|
|
|
|
|
{
|
|
|
|
|
// Unflatten or flatten from [[stage_in]] or [[stage_out]] as appropriate.
|
|
|
|
|
switch (storage)
|
|
|
|
@ -1989,7 +2072,7 @@ void CompilerMSL::add_plain_member_variable_to_interface_block(StorageClass stor
|
|
|
|
|
{
|
|
|
|
|
// The block itself might have a location and in this case, all members of the block
|
|
|
|
|
// receive incrementing locations.
|
|
|
|
|
uint32_t locn = get_accumulated_member_location(var, mbr_idx, strip_array);
|
|
|
|
|
uint32_t locn = get_accumulated_member_location(var, mbr_idx, meta.strip_array);
|
|
|
|
|
if (storage == StorageClassInput && (get_execution_model() == ExecutionModelVertex || is_tessellation_shader()))
|
|
|
|
|
{
|
|
|
|
|
mbr_type_id = ensure_correct_attribute_type(mbr_type_id, locn);
|
|
|
|
@ -2155,19 +2238,19 @@ void CompilerMSL::add_tess_level_input_to_interface_block(const std::string &ib_
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void CompilerMSL::add_variable_to_interface_block(StorageClass storage, const string &ib_var_ref, SPIRType &ib_type,
|
|
|
|
|
SPIRVariable &var, bool strip_array)
|
|
|
|
|
SPIRVariable &var, InterfaceBlockMeta &meta)
|
|
|
|
|
{
|
|
|
|
|
auto &entry_func = get<SPIRFunction>(ir.default_entry_point);
|
|
|
|
|
// Tessellation control I/O variables and tessellation evaluation per-point inputs are
|
|
|
|
|
// usually declared as arrays. In these cases, we want to add the element type to the
|
|
|
|
|
// interface block, since in Metal it's the interface block itself which is arrayed.
|
|
|
|
|
auto &var_type = strip_array ? get_variable_element_type(var) : get_variable_data_type(var);
|
|
|
|
|
auto &var_type = meta.strip_array ? get_variable_element_type(var) : get_variable_data_type(var);
|
|
|
|
|
bool is_builtin = is_builtin_variable(var);
|
|
|
|
|
auto builtin = BuiltIn(get_decoration(var.self, DecorationBuiltIn));
|
|
|
|
|
|
|
|
|
|
if (var_type.basetype == SPIRType::Struct)
|
|
|
|
|
{
|
|
|
|
|
if (!is_builtin_type(var_type) && (!capture_output_to_buffer || storage == StorageClassInput) && !strip_array)
|
|
|
|
|
if (!is_builtin_type(var_type) && (!capture_output_to_buffer || storage == StorageClassInput) && !meta.strip_array)
|
|
|
|
|
{
|
|
|
|
|
// For I/O blocks or structs, we will need to pass the block itself around
|
|
|
|
|
// to functions if they are used globally in leaf functions.
|
|
|
|
@ -2186,7 +2269,7 @@ void CompilerMSL::add_variable_to_interface_block(StorageClass storage, const st
|
|
|
|
|
// Luckily, for stage-out when capturing output, we can avoid this and just add
|
|
|
|
|
// composite members directly, because the stage-out structure is stored to a buffer,
|
|
|
|
|
// not returned.
|
|
|
|
|
add_plain_variable_to_interface_block(storage, ib_var_ref, ib_type, var, strip_array);
|
|
|
|
|
add_plain_variable_to_interface_block(storage, ib_var_ref, ib_type, var, meta);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
@ -2210,19 +2293,19 @@ void CompilerMSL::add_variable_to_interface_block(StorageClass storage, const st
|
|
|
|
|
if ((!is_builtin || attribute_load_store) && storage_is_stage_io && is_composite_type)
|
|
|
|
|
{
|
|
|
|
|
add_composite_member_variable_to_interface_block(storage, ib_var_ref, ib_type, var, mbr_idx,
|
|
|
|
|
strip_array);
|
|
|
|
|
meta);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
add_plain_member_variable_to_interface_block(storage, ib_var_ref, ib_type, var, mbr_idx,
|
|
|
|
|
strip_array);
|
|
|
|
|
meta);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else if (get_execution_model() == ExecutionModelTessellationEvaluation && storage == StorageClassInput &&
|
|
|
|
|
!strip_array && is_builtin && (builtin == BuiltInTessLevelOuter || builtin == BuiltInTessLevelInner))
|
|
|
|
|
!meta.strip_array && is_builtin && (builtin == BuiltInTessLevelOuter || builtin == BuiltInTessLevelInner))
|
|
|
|
|
{
|
|
|
|
|
add_tess_level_input_to_interface_block(ib_var_ref, ib_type, var);
|
|
|
|
|
}
|
|
|
|
@ -2243,11 +2326,11 @@ void CompilerMSL::add_variable_to_interface_block(StorageClass storage, const st
|
|
|
|
|
// MSL does not allow matrices or arrays in input or output variables, so need to handle it specially.
|
|
|
|
|
if ((!is_builtin || attribute_load_store) && storage_is_stage_io && is_composite_type)
|
|
|
|
|
{
|
|
|
|
|
add_composite_variable_to_interface_block(storage, ib_var_ref, ib_type, var, strip_array);
|
|
|
|
|
add_composite_variable_to_interface_block(storage, ib_var_ref, ib_type, var, meta);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
add_plain_variable_to_interface_block(storage, ib_var_ref, ib_type, var, strip_array);
|
|
|
|
|
add_plain_variable_to_interface_block(storage, ib_var_ref, ib_type, var, meta);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
@ -2301,6 +2384,18 @@ uint32_t CompilerMSL::add_interface_block(StorageClass storage, bool patch)
|
|
|
|
|
bool incl_builtins = storage == StorageClassOutput || is_tessellation_shader();
|
|
|
|
|
bool has_seen_barycentric = false;
|
|
|
|
|
|
|
|
|
|
InterfaceBlockMeta meta;
|
|
|
|
|
|
|
|
|
|
// Varying interfaces between stages which use "user()" attribute can be dealt with
|
|
|
|
|
// without explicit packing and unpacking of components. For any variables which link against the runtime
|
|
|
|
|
// in some way (vertex attributes, fragment output, etc), we'll need to deal with it somehow.
|
|
|
|
|
bool pack_components =
|
|
|
|
|
(storage == StorageClassInput && get_execution_model() == ExecutionModelVertex) ||
|
|
|
|
|
(storage == StorageClassOutput && get_execution_model() == ExecutionModelFragment) ||
|
|
|
|
|
(storage == StorageClassInput && is_tessellation_shader()) ||
|
|
|
|
|
(storage == StorageClassOutput && get_execution_model() == ExecutionModelTessellationControl) ||
|
|
|
|
|
(storage == StorageClassOutput && get_execution_model() == ExecutionModelVertex && capture_output_to_buffer);
|
|
|
|
|
|
|
|
|
|
ir.for_each_typed_id<SPIRVariable>([&](uint32_t var_id, SPIRVariable &var) {
|
|
|
|
|
if (var.storage != storage)
|
|
|
|
|
return;
|
|
|
|
@ -2347,6 +2442,21 @@ uint32_t CompilerMSL::add_interface_block(StorageClass storage, bool patch)
|
|
|
|
|
(!is_builtin || is_interface_block_builtin))
|
|
|
|
|
{
|
|
|
|
|
vars.push_back(&var);
|
|
|
|
|
|
|
|
|
|
if (pack_components && !is_builtin)
|
|
|
|
|
{
|
|
|
|
|
// Need to deal specially with DecorationComponent.
|
|
|
|
|
// Multiple variables can alias the same Location, and try to make sure each location is declared only once.
|
|
|
|
|
// We will swizzle data in and out to make this work.
|
|
|
|
|
// We only need to consider plain variables here, not composites.
|
|
|
|
|
uint32_t component = get_decoration(var_id, DecorationComponent);
|
|
|
|
|
if (component != 0)
|
|
|
|
|
{
|
|
|
|
|
uint32_t location = get_decoration(var_id, DecorationLocation);
|
|
|
|
|
auto &location_meta = meta.location_meta[location];
|
|
|
|
|
location_meta.num_components = std::max(location_meta.num_components, component + type.vecsize);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
@ -2480,7 +2590,9 @@ uint32_t CompilerMSL::add_interface_block(StorageClass storage, bool patch)
|
|
|
|
|
(get_execution_model() == ExecutionModelTessellationControl ||
|
|
|
|
|
(get_execution_model() == ExecutionModelTessellationEvaluation && storage == StorageClassInput)) &&
|
|
|
|
|
!patch;
|
|
|
|
|
add_variable_to_interface_block(storage, ib_var_ref, ib_type, *p_var, strip_array);
|
|
|
|
|
|
|
|
|
|
meta.strip_array = strip_array;
|
|
|
|
|
add_variable_to_interface_block(storage, ib_var_ref, ib_type, *p_var, meta);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Sort the members of the structure by their locations.
|
|
|
|
@ -2599,13 +2711,18 @@ uint32_t CompilerMSL::ensure_correct_builtin_type(uint32_t type_id, BuiltIn buil
|
|
|
|
|
// Ensure that the type is compatible with the vertex attribute.
|
|
|
|
|
// If it is, simply return the given type ID.
|
|
|
|
|
// Otherwise, create a new type, and return its ID.
|
|
|
|
|
uint32_t CompilerMSL::ensure_correct_attribute_type(uint32_t type_id, uint32_t location)
|
|
|
|
|
uint32_t CompilerMSL::ensure_correct_attribute_type(uint32_t type_id, uint32_t location, uint32_t num_components)
|
|
|
|
|
{
|
|
|
|
|
auto &type = get<SPIRType>(type_id);
|
|
|
|
|
|
|
|
|
|
auto p_va = vtx_attrs_by_location.find(location);
|
|
|
|
|
if (p_va == end(vtx_attrs_by_location))
|
|
|
|
|
return type_id;
|
|
|
|
|
{
|
|
|
|
|
if (num_components != 0 && type.vecsize != num_components)
|
|
|
|
|
return build_extended_vector_type(type_id, num_components);
|
|
|
|
|
else
|
|
|
|
|
return type_id;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch (p_va->second.format)
|
|
|
|
|
{
|
|
|
|
@ -2616,19 +2733,27 @@ uint32_t CompilerMSL::ensure_correct_attribute_type(uint32_t type_id, uint32_t l
|
|
|
|
|
case SPIRType::UByte:
|
|
|
|
|
case SPIRType::UShort:
|
|
|
|
|
case SPIRType::UInt:
|
|
|
|
|
return type_id;
|
|
|
|
|
if (num_components != 0 && type.vecsize != num_components)
|
|
|
|
|
return build_extended_vector_type(type_id, num_components);
|
|
|
|
|
else
|
|
|
|
|
return type_id;
|
|
|
|
|
|
|
|
|
|
case SPIRType::Short:
|
|
|
|
|
case SPIRType::Int:
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
SPIRV_CROSS_THROW("Vertex attribute type mismatch between host and shader");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uint32_t next_id = ir.increase_bound_by(type.pointer ? 2 : 1);
|
|
|
|
|
uint32_t base_type_id = next_id++;
|
|
|
|
|
auto &base_type = set<SPIRType>(base_type_id);
|
|
|
|
|
base_type = type;
|
|
|
|
|
base_type.basetype = type.basetype == SPIRType::Short ? SPIRType::UShort : SPIRType::UInt;
|
|
|
|
|
base_type.pointer = false;
|
|
|
|
|
if (num_components != 0)
|
|
|
|
|
base_type.vecsize = num_components;
|
|
|
|
|
|
|
|
|
|
if (!type.pointer)
|
|
|
|
|
return base_type_id;
|
|
|
|
@ -2648,18 +2773,26 @@ uint32_t CompilerMSL::ensure_correct_attribute_type(uint32_t type_id, uint32_t l
|
|
|
|
|
{
|
|
|
|
|
case SPIRType::UShort:
|
|
|
|
|
case SPIRType::UInt:
|
|
|
|
|
return type_id;
|
|
|
|
|
if (num_components != 0 && type.vecsize != num_components)
|
|
|
|
|
return build_extended_vector_type(type_id, num_components);
|
|
|
|
|
else
|
|
|
|
|
return type_id;
|
|
|
|
|
|
|
|
|
|
case SPIRType::Int:
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
SPIRV_CROSS_THROW("Vertex attribute type mismatch between host and shader");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uint32_t next_id = ir.increase_bound_by(type.pointer ? 2 : 1);
|
|
|
|
|
uint32_t base_type_id = next_id++;
|
|
|
|
|
auto &base_type = set<SPIRType>(base_type_id);
|
|
|
|
|
base_type = type;
|
|
|
|
|
base_type.basetype = SPIRType::UInt;
|
|
|
|
|
base_type.pointer = false;
|
|
|
|
|
if (num_components != 0)
|
|
|
|
|
base_type.vecsize = num_components;
|
|
|
|
|
|
|
|
|
|
if (!type.pointer)
|
|
|
|
|
return base_type_id;
|
|
|
|
@ -2674,7 +2807,8 @@ uint32_t CompilerMSL::ensure_correct_attribute_type(uint32_t type_id, uint32_t l
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
case MSL_VERTEX_FORMAT_OTHER:
|
|
|
|
|
if (num_components != 0 && type.vecsize != num_components)
|
|
|
|
|
type_id = build_extended_vector_type(type_id, num_components);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|