2016-04-06 21:42:27 +00:00
|
|
|
/*
|
2019-01-04 11:38:35 +00:00
|
|
|
* Copyright 2016-2019 The Brenwill Workshop Ltd.
|
2016-04-06 21:42:27 +00:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "spirv_msl.hpp"
|
2016-10-27 22:47:17 +00:00
|
|
|
#include "GLSL.std.450.h"
|
2017-01-27 00:40:56 +00:00
|
|
|
|
2016-04-06 21:42:27 +00:00
|
|
|
#include <algorithm>
|
2018-02-09 11:13:33 +00:00
|
|
|
#include <assert.h>
|
2018-02-15 12:32:49 +00:00
|
|
|
#include <numeric>
|
2016-04-06 21:42:27 +00:00
|
|
|
|
|
|
|
using namespace spv;
|
2019-03-29 09:29:44 +00:00
|
|
|
using namespace SPIRV_CROSS_NAMESPACE;
|
2016-04-06 21:42:27 +00:00
|
|
|
using namespace std;
|
|
|
|
|
2018-03-06 16:07:59 +00:00
|
|
|
static const uint32_t k_unknown_location = ~0u;
|
2018-09-05 22:31:10 +00:00
|
|
|
static const uint32_t k_unknown_component = ~0u;
|
2018-03-06 16:07:59 +00:00
|
|
|
|
2019-04-09 10:46:23 +00:00
|
|
|
CompilerMSL::CompilerMSL(std::vector<uint32_t> spirv_)
|
2017-04-04 20:38:17 +00:00
|
|
|
: CompilerGLSL(move(spirv_))
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
2017-01-08 03:15:58 +00:00
|
|
|
}
|
|
|
|
|
2019-02-12 10:11:29 +00:00
|
|
|
CompilerMSL::CompilerMSL(const uint32_t *ir_, size_t word_count)
|
2018-10-05 09:30:57 +00:00
|
|
|
: CompilerGLSL(ir_, word_count)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-02-12 10:11:29 +00:00
|
|
|
CompilerMSL::CompilerMSL(const ParsedIR &ir_)
|
2018-10-05 09:30:57 +00:00
|
|
|
: CompilerGLSL(ir_)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-02-12 10:11:29 +00:00
|
|
|
CompilerMSL::CompilerMSL(ParsedIR &&ir_)
|
2018-10-05 09:30:57 +00:00
|
|
|
: CompilerGLSL(std::move(ir_))
|
2017-04-01 14:08:19 +00:00
|
|
|
{
|
2019-02-12 10:11:29 +00:00
|
|
|
}
|
2017-04-04 20:38:17 +00:00
|
|
|
|
2019-02-12 10:11:29 +00:00
|
|
|
void CompilerMSL::add_msl_vertex_attribute(const MSLVertexAttr &va)
|
|
|
|
{
|
|
|
|
vtx_attrs_by_location[va.location] = va;
|
|
|
|
if (va.builtin != BuiltInMax && !vtx_attrs_by_builtin.count(va.builtin))
|
|
|
|
vtx_attrs_by_builtin[va.builtin] = va;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CompilerMSL::add_msl_resource_binding(const MSLResourceBinding &binding)
|
|
|
|
{
|
2019-06-10 13:41:36 +00:00
|
|
|
StageSetBinding tuple = { binding.stage, binding.desc_set, binding.binding };
|
|
|
|
resource_bindings[tuple] = { binding, false };
|
2019-02-12 10:11:29 +00:00
|
|
|
}
|
|
|
|
|
2019-03-15 20:53:21 +00:00
|
|
|
void CompilerMSL::add_discrete_descriptor_set(uint32_t desc_set)
|
2019-03-15 13:07:03 +00:00
|
|
|
{
|
|
|
|
if (desc_set < kMaxArgumentBuffers)
|
2019-03-15 20:53:21 +00:00
|
|
|
argument_buffer_discrete_mask |= 1u << desc_set;
|
2019-03-15 13:07:03 +00:00
|
|
|
}
|
|
|
|
|
2019-02-12 10:11:29 +00:00
|
|
|
bool CompilerMSL::is_msl_vertex_attribute_used(uint32_t location)
|
|
|
|
{
|
|
|
|
return vtx_attrs_in_use.count(location) != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CompilerMSL::is_msl_resource_binding_used(ExecutionModel model, uint32_t desc_set, uint32_t binding)
|
|
|
|
{
|
2019-06-10 13:41:36 +00:00
|
|
|
StageSetBinding tuple = { model, desc_set, binding };
|
|
|
|
auto itr = resource_bindings.find(tuple);
|
|
|
|
return itr != end(resource_bindings) && itr->second.second;
|
2017-04-01 14:08:19 +00:00
|
|
|
}
|
|
|
|
|
2019-01-14 13:53:47 +00:00
|
|
|
void CompilerMSL::set_fragment_output_components(uint32_t location, uint32_t components)
|
|
|
|
{
|
|
|
|
fragment_output_components[location] = components;
|
|
|
|
}
|
|
|
|
|
2018-02-09 10:27:23 +00:00
|
|
|
void CompilerMSL::build_implicit_builtins()
|
|
|
|
{
|
2018-09-12 19:05:52 +00:00
|
|
|
bool need_sample_pos = active_input_builtins.get(BuiltInSamplePosition);
|
2019-02-04 05:58:46 +00:00
|
|
|
bool need_vertex_params = capture_output_to_buffer && get_execution_model() == ExecutionModelVertex;
|
|
|
|
bool need_tesc_params = get_execution_model() == ExecutionModelTessellationControl;
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
bool need_subgroup_mask =
|
|
|
|
active_input_builtins.get(BuiltInSubgroupEqMask) || active_input_builtins.get(BuiltInSubgroupGeMask) ||
|
|
|
|
active_input_builtins.get(BuiltInSubgroupGtMask) || active_input_builtins.get(BuiltInSubgroupLeMask) ||
|
|
|
|
active_input_builtins.get(BuiltInSubgroupLtMask);
|
|
|
|
bool need_subgroup_ge_mask = !msl_options.is_ios() && (active_input_builtins.get(BuiltInSubgroupGeMask) ||
|
|
|
|
active_input_builtins.get(BuiltInSubgroupGtMask));
|
|
|
|
if (need_subpass_input || need_sample_pos || need_subgroup_mask || need_vertex_params || need_tesc_params ||
|
|
|
|
needs_subgroup_invocation_id)
|
2018-02-09 10:27:23 +00:00
|
|
|
{
|
|
|
|
bool has_frag_coord = false;
|
2018-09-12 19:05:52 +00:00
|
|
|
bool has_sample_id = false;
|
2019-01-08 22:33:32 +00:00
|
|
|
bool has_vertex_idx = false;
|
|
|
|
bool has_base_vertex = false;
|
|
|
|
bool has_instance_idx = false;
|
|
|
|
bool has_base_instance = false;
|
2019-02-04 05:58:46 +00:00
|
|
|
bool has_invocation_id = false;
|
|
|
|
bool has_primitive_id = false;
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
bool has_subgroup_invocation_id = false;
|
|
|
|
bool has_subgroup_size = false;
|
2018-02-09 10:27:23 +00:00
|
|
|
|
2019-01-10 08:49:33 +00:00
|
|
|
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
|
2019-01-08 22:33:32 +00:00
|
|
|
if (var.storage != StorageClassInput || !ir.meta[var.self].decoration.builtin)
|
|
|
|
return;
|
|
|
|
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
BuiltIn builtin = ir.meta[var.self].decoration.builtin_type;
|
|
|
|
if (need_subpass_input && builtin == BuiltInFragCoord)
|
2018-02-09 10:27:23 +00:00
|
|
|
{
|
2018-02-09 11:13:33 +00:00
|
|
|
builtin_frag_coord_id = var.self;
|
2018-02-09 10:27:23 +00:00
|
|
|
has_frag_coord = true;
|
|
|
|
}
|
2018-09-12 19:05:52 +00:00
|
|
|
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
if (need_sample_pos && builtin == BuiltInSampleId)
|
2018-09-12 19:05:52 +00:00
|
|
|
{
|
|
|
|
builtin_sample_id_id = var.self;
|
|
|
|
has_sample_id = true;
|
|
|
|
}
|
2019-01-08 22:33:32 +00:00
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
if (need_vertex_params)
|
2019-01-08 22:33:32 +00:00
|
|
|
{
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
switch (builtin)
|
2019-01-08 22:33:32 +00:00
|
|
|
{
|
|
|
|
case BuiltInVertexIndex:
|
|
|
|
builtin_vertex_idx_id = var.self;
|
|
|
|
has_vertex_idx = true;
|
|
|
|
break;
|
|
|
|
case BuiltInBaseVertex:
|
|
|
|
builtin_base_vertex_id = var.self;
|
|
|
|
has_base_vertex = true;
|
|
|
|
break;
|
|
|
|
case BuiltInInstanceIndex:
|
|
|
|
builtin_instance_idx_id = var.self;
|
|
|
|
has_instance_idx = true;
|
|
|
|
break;
|
|
|
|
case BuiltInBaseInstance:
|
|
|
|
builtin_base_instance_id = var.self;
|
|
|
|
has_base_instance = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2019-02-04 05:58:46 +00:00
|
|
|
|
|
|
|
if (need_tesc_params)
|
|
|
|
{
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
switch (builtin)
|
2019-02-04 05:58:46 +00:00
|
|
|
{
|
|
|
|
case BuiltInInvocationId:
|
|
|
|
builtin_invocation_id_id = var.self;
|
|
|
|
has_invocation_id = true;
|
|
|
|
break;
|
|
|
|
case BuiltInPrimitiveId:
|
|
|
|
builtin_primitive_id_id = var.self;
|
|
|
|
has_primitive_id = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
|
|
|
|
if ((need_subgroup_mask || needs_subgroup_invocation_id) && builtin == BuiltInSubgroupLocalInvocationId)
|
|
|
|
{
|
|
|
|
builtin_subgroup_invocation_id_id = var.self;
|
|
|
|
has_subgroup_invocation_id = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (need_subgroup_ge_mask && builtin == BuiltInSubgroupSize)
|
|
|
|
{
|
|
|
|
builtin_subgroup_size_id = var.self;
|
|
|
|
has_subgroup_size = true;
|
|
|
|
}
|
2019-01-10 08:49:33 +00:00
|
|
|
});
|
2018-02-09 10:27:23 +00:00
|
|
|
|
2018-09-12 19:05:52 +00:00
|
|
|
if (!has_frag_coord && need_subpass_input)
|
2018-02-09 10:27:23 +00:00
|
|
|
{
|
2018-10-05 09:30:57 +00:00
|
|
|
uint32_t offset = ir.increase_bound_by(3);
|
2018-02-09 10:27:23 +00:00
|
|
|
uint32_t type_id = offset;
|
|
|
|
uint32_t type_ptr_id = offset + 1;
|
|
|
|
uint32_t var_id = offset + 2;
|
|
|
|
|
|
|
|
// Create gl_FragCoord.
|
|
|
|
SPIRType vec4_type;
|
|
|
|
vec4_type.basetype = SPIRType::Float;
|
2018-02-13 19:44:40 +00:00
|
|
|
vec4_type.width = 32;
|
2018-02-09 10:27:23 +00:00
|
|
|
vec4_type.vecsize = 4;
|
|
|
|
set<SPIRType>(type_id, vec4_type);
|
|
|
|
|
|
|
|
SPIRType vec4_type_ptr;
|
|
|
|
vec4_type_ptr = vec4_type;
|
|
|
|
vec4_type_ptr.pointer = true;
|
|
|
|
vec4_type_ptr.parent_type = type_id;
|
|
|
|
vec4_type_ptr.storage = StorageClassInput;
|
|
|
|
auto &ptr_type = set<SPIRType>(type_ptr_id, vec4_type_ptr);
|
2018-02-09 11:13:33 +00:00
|
|
|
ptr_type.self = type_id;
|
2018-02-09 10:27:23 +00:00
|
|
|
|
|
|
|
set<SPIRVariable>(var_id, type_ptr_id, StorageClassInput);
|
|
|
|
set_decoration(var_id, DecorationBuiltIn, BuiltInFragCoord);
|
2018-02-09 11:13:33 +00:00
|
|
|
builtin_frag_coord_id = var_id;
|
2019-05-31 11:19:33 +00:00
|
|
|
mark_implicit_builtin(StorageClassInput, BuiltInFragCoord, var_id);
|
2018-02-09 10:27:23 +00:00
|
|
|
}
|
2018-09-12 19:05:52 +00:00
|
|
|
|
|
|
|
if (!has_sample_id && need_sample_pos)
|
|
|
|
{
|
2018-10-05 09:30:57 +00:00
|
|
|
uint32_t offset = ir.increase_bound_by(3);
|
2018-09-12 19:05:52 +00:00
|
|
|
uint32_t type_id = offset;
|
|
|
|
uint32_t type_ptr_id = offset + 1;
|
|
|
|
uint32_t var_id = offset + 2;
|
|
|
|
|
|
|
|
// Create gl_SampleID.
|
|
|
|
SPIRType uint_type;
|
|
|
|
uint_type.basetype = SPIRType::UInt;
|
|
|
|
uint_type.width = 32;
|
|
|
|
set<SPIRType>(type_id, uint_type);
|
|
|
|
|
|
|
|
SPIRType uint_type_ptr;
|
|
|
|
uint_type_ptr = uint_type;
|
|
|
|
uint_type_ptr.pointer = true;
|
|
|
|
uint_type_ptr.parent_type = type_id;
|
|
|
|
uint_type_ptr.storage = StorageClassInput;
|
|
|
|
auto &ptr_type = set<SPIRType>(type_ptr_id, uint_type_ptr);
|
|
|
|
ptr_type.self = type_id;
|
|
|
|
|
|
|
|
set<SPIRVariable>(var_id, type_ptr_id, StorageClassInput);
|
|
|
|
set_decoration(var_id, DecorationBuiltIn, BuiltInSampleId);
|
|
|
|
builtin_sample_id_id = var_id;
|
2019-05-31 11:19:33 +00:00
|
|
|
mark_implicit_builtin(StorageClassInput, BuiltInSampleId, var_id);
|
2018-09-12 19:05:52 +00:00
|
|
|
}
|
2019-01-08 22:33:32 +00:00
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
if (need_vertex_params && (!has_vertex_idx || !has_base_vertex || !has_instance_idx || !has_base_instance))
|
2019-01-08 22:33:32 +00:00
|
|
|
{
|
|
|
|
uint32_t offset = ir.increase_bound_by(2);
|
|
|
|
uint32_t type_id = offset;
|
|
|
|
uint32_t type_ptr_id = offset + 1;
|
|
|
|
|
|
|
|
SPIRType uint_type;
|
|
|
|
uint_type.basetype = SPIRType::UInt;
|
|
|
|
uint_type.width = 32;
|
|
|
|
set<SPIRType>(type_id, uint_type);
|
|
|
|
|
|
|
|
SPIRType uint_type_ptr;
|
|
|
|
uint_type_ptr = uint_type;
|
|
|
|
uint_type_ptr.pointer = true;
|
|
|
|
uint_type_ptr.parent_type = type_id;
|
|
|
|
uint_type_ptr.storage = StorageClassInput;
|
|
|
|
auto &ptr_type = set<SPIRType>(type_ptr_id, uint_type_ptr);
|
|
|
|
ptr_type.self = type_id;
|
|
|
|
|
|
|
|
if (!has_vertex_idx)
|
|
|
|
{
|
|
|
|
uint32_t var_id = ir.increase_bound_by(1);
|
|
|
|
|
|
|
|
// Create gl_VertexIndex.
|
|
|
|
set<SPIRVariable>(var_id, type_ptr_id, StorageClassInput);
|
|
|
|
set_decoration(var_id, DecorationBuiltIn, BuiltInVertexIndex);
|
|
|
|
builtin_vertex_idx_id = var_id;
|
2019-05-31 11:19:33 +00:00
|
|
|
mark_implicit_builtin(StorageClassInput, BuiltInVertexIndex, var_id);
|
2019-01-08 22:33:32 +00:00
|
|
|
}
|
2019-05-31 11:19:33 +00:00
|
|
|
|
2019-01-08 22:33:32 +00:00
|
|
|
if (!has_base_vertex)
|
|
|
|
{
|
|
|
|
uint32_t var_id = ir.increase_bound_by(1);
|
|
|
|
|
|
|
|
// Create gl_BaseVertex.
|
|
|
|
set<SPIRVariable>(var_id, type_ptr_id, StorageClassInput);
|
|
|
|
set_decoration(var_id, DecorationBuiltIn, BuiltInBaseVertex);
|
|
|
|
builtin_base_vertex_id = var_id;
|
2019-05-31 11:19:33 +00:00
|
|
|
mark_implicit_builtin(StorageClassInput, BuiltInBaseVertex, var_id);
|
2019-01-08 22:33:32 +00:00
|
|
|
}
|
2019-05-31 11:19:33 +00:00
|
|
|
|
2019-01-08 22:33:32 +00:00
|
|
|
if (!has_instance_idx)
|
|
|
|
{
|
|
|
|
uint32_t var_id = ir.increase_bound_by(1);
|
|
|
|
|
|
|
|
// Create gl_InstanceIndex.
|
|
|
|
set<SPIRVariable>(var_id, type_ptr_id, StorageClassInput);
|
|
|
|
set_decoration(var_id, DecorationBuiltIn, BuiltInInstanceIndex);
|
|
|
|
builtin_instance_idx_id = var_id;
|
2019-05-31 11:19:33 +00:00
|
|
|
mark_implicit_builtin(StorageClassInput, BuiltInInstanceIndex, var_id);
|
2019-01-08 22:33:32 +00:00
|
|
|
}
|
2019-05-31 11:19:33 +00:00
|
|
|
|
2019-01-08 22:33:32 +00:00
|
|
|
if (!has_base_instance)
|
|
|
|
{
|
|
|
|
uint32_t var_id = ir.increase_bound_by(1);
|
|
|
|
|
|
|
|
// Create gl_BaseInstance.
|
|
|
|
set<SPIRVariable>(var_id, type_ptr_id, StorageClassInput);
|
|
|
|
set_decoration(var_id, DecorationBuiltIn, BuiltInBaseInstance);
|
|
|
|
builtin_base_instance_id = var_id;
|
2019-05-31 11:19:33 +00:00
|
|
|
mark_implicit_builtin(StorageClassInput, BuiltInBaseInstance, var_id);
|
2019-01-08 22:33:32 +00:00
|
|
|
}
|
|
|
|
}
|
2019-02-04 05:58:46 +00:00
|
|
|
|
|
|
|
if (need_tesc_params && (!has_invocation_id || !has_primitive_id))
|
|
|
|
{
|
|
|
|
uint32_t offset = ir.increase_bound_by(2);
|
|
|
|
uint32_t type_id = offset;
|
|
|
|
uint32_t type_ptr_id = offset + 1;
|
|
|
|
|
|
|
|
SPIRType uint_type;
|
|
|
|
uint_type.basetype = SPIRType::UInt;
|
|
|
|
uint_type.width = 32;
|
|
|
|
set<SPIRType>(type_id, uint_type);
|
|
|
|
|
|
|
|
SPIRType uint_type_ptr;
|
|
|
|
uint_type_ptr = uint_type;
|
|
|
|
uint_type_ptr.pointer = true;
|
|
|
|
uint_type_ptr.parent_type = type_id;
|
|
|
|
uint_type_ptr.storage = StorageClassInput;
|
|
|
|
auto &ptr_type = set<SPIRType>(type_ptr_id, uint_type_ptr);
|
|
|
|
ptr_type.self = type_id;
|
|
|
|
|
|
|
|
if (!has_invocation_id)
|
|
|
|
{
|
|
|
|
uint32_t var_id = ir.increase_bound_by(1);
|
|
|
|
|
|
|
|
// Create gl_InvocationID.
|
|
|
|
set<SPIRVariable>(var_id, type_ptr_id, StorageClassInput);
|
|
|
|
set_decoration(var_id, DecorationBuiltIn, BuiltInInvocationId);
|
|
|
|
builtin_invocation_id_id = var_id;
|
2019-05-31 11:19:33 +00:00
|
|
|
mark_implicit_builtin(StorageClassInput, BuiltInInvocationId, var_id);
|
2019-02-04 05:58:46 +00:00
|
|
|
}
|
2019-05-31 11:19:33 +00:00
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
if (!has_primitive_id)
|
|
|
|
{
|
|
|
|
uint32_t var_id = ir.increase_bound_by(1);
|
|
|
|
|
|
|
|
// Create gl_PrimitiveID.
|
|
|
|
set<SPIRVariable>(var_id, type_ptr_id, StorageClassInput);
|
|
|
|
set_decoration(var_id, DecorationBuiltIn, BuiltInPrimitiveId);
|
|
|
|
builtin_primitive_id_id = var_id;
|
2019-05-31 11:19:33 +00:00
|
|
|
mark_implicit_builtin(StorageClassInput, BuiltInPrimitiveId, var_id);
|
2019-02-04 05:58:46 +00:00
|
|
|
}
|
|
|
|
}
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
|
|
|
|
if (!has_subgroup_invocation_id && (need_subgroup_mask || needs_subgroup_invocation_id))
|
|
|
|
{
|
|
|
|
uint32_t offset = ir.increase_bound_by(3);
|
|
|
|
uint32_t type_id = offset;
|
|
|
|
uint32_t type_ptr_id = offset + 1;
|
|
|
|
uint32_t var_id = offset + 2;
|
|
|
|
|
|
|
|
// Create gl_SubgroupInvocationID.
|
|
|
|
SPIRType uint_type;
|
|
|
|
uint_type.basetype = SPIRType::UInt;
|
|
|
|
uint_type.width = 32;
|
|
|
|
set<SPIRType>(type_id, uint_type);
|
|
|
|
|
|
|
|
SPIRType uint_type_ptr;
|
|
|
|
uint_type_ptr = uint_type;
|
|
|
|
uint_type_ptr.pointer = true;
|
|
|
|
uint_type_ptr.parent_type = type_id;
|
|
|
|
uint_type_ptr.storage = StorageClassInput;
|
|
|
|
auto &ptr_type = set<SPIRType>(type_ptr_id, uint_type_ptr);
|
|
|
|
ptr_type.self = type_id;
|
|
|
|
|
|
|
|
set<SPIRVariable>(var_id, type_ptr_id, StorageClassInput);
|
|
|
|
set_decoration(var_id, DecorationBuiltIn, BuiltInSubgroupLocalInvocationId);
|
|
|
|
builtin_subgroup_invocation_id_id = var_id;
|
2019-05-31 11:19:33 +00:00
|
|
|
mark_implicit_builtin(StorageClassInput, BuiltInSubgroupLocalInvocationId, var_id);
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!has_subgroup_size && need_subgroup_ge_mask)
|
|
|
|
{
|
|
|
|
uint32_t offset = ir.increase_bound_by(3);
|
|
|
|
uint32_t type_id = offset;
|
|
|
|
uint32_t type_ptr_id = offset + 1;
|
|
|
|
uint32_t var_id = offset + 2;
|
|
|
|
|
|
|
|
// Create gl_SubgroupSize.
|
|
|
|
SPIRType uint_type;
|
|
|
|
uint_type.basetype = SPIRType::UInt;
|
|
|
|
uint_type.width = 32;
|
|
|
|
set<SPIRType>(type_id, uint_type);
|
|
|
|
|
|
|
|
SPIRType uint_type_ptr;
|
|
|
|
uint_type_ptr = uint_type;
|
|
|
|
uint_type_ptr.pointer = true;
|
|
|
|
uint_type_ptr.parent_type = type_id;
|
|
|
|
uint_type_ptr.storage = StorageClassInput;
|
|
|
|
auto &ptr_type = set<SPIRType>(type_ptr_id, uint_type_ptr);
|
|
|
|
ptr_type.self = type_id;
|
|
|
|
|
|
|
|
set<SPIRVariable>(var_id, type_ptr_id, StorageClassInput);
|
|
|
|
set_decoration(var_id, DecorationBuiltIn, BuiltInSubgroupSize);
|
|
|
|
builtin_subgroup_size_id = var_id;
|
2019-05-31 11:19:33 +00:00
|
|
|
mark_implicit_builtin(StorageClassInput, BuiltInSubgroupSize, var_id);
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
}
|
2018-02-09 10:27:23 +00:00
|
|
|
}
|
2018-09-23 00:36:11 +00:00
|
|
|
|
2019-05-09 10:15:45 +00:00
|
|
|
if (needs_swizzle_buffer_def)
|
2018-09-23 00:36:11 +00:00
|
|
|
{
|
2019-05-27 09:59:29 +00:00
|
|
|
uint32_t var_id = build_constant_uint_array_pointer();
|
2019-05-09 10:15:45 +00:00
|
|
|
set_name(var_id, "spvSwizzleConstants");
|
2018-09-23 00:36:11 +00:00
|
|
|
// This should never match anything.
|
2019-05-27 09:59:29 +00:00
|
|
|
set_decoration(var_id, DecorationDescriptorSet, kSwizzleBufferBinding);
|
2019-05-09 10:15:45 +00:00
|
|
|
set_decoration(var_id, DecorationBinding, msl_options.swizzle_buffer_index);
|
|
|
|
swizzle_buffer_id = var_id;
|
2018-09-23 00:36:11 +00:00
|
|
|
}
|
2019-05-27 09:59:29 +00:00
|
|
|
|
|
|
|
if (!buffers_requiring_array_length.empty())
|
|
|
|
{
|
|
|
|
uint32_t var_id = build_constant_uint_array_pointer();
|
|
|
|
set_name(var_id, "spvBufferSizeConstants");
|
|
|
|
// This should never match anything.
|
|
|
|
set_decoration(var_id, DecorationDescriptorSet, kBufferSizeBufferBinding);
|
|
|
|
set_decoration(var_id, DecorationBinding, msl_options.buffer_size_buffer_index);
|
|
|
|
buffer_size_buffer_id = var_id;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-31 11:19:33 +00:00
|
|
|
void CompilerMSL::mark_implicit_builtin(StorageClass storage, BuiltIn builtin, uint32_t id)
|
|
|
|
{
|
|
|
|
Bitset *active_builtins = nullptr;
|
|
|
|
switch (storage)
|
|
|
|
{
|
|
|
|
case StorageClassInput:
|
|
|
|
active_builtins = &active_input_builtins;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case StorageClassOutput:
|
|
|
|
active_builtins = &active_output_builtins;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(active_builtins != nullptr);
|
|
|
|
active_builtins->set(builtin);
|
|
|
|
get_entry_point().interface_variables.push_back(id);
|
|
|
|
}
|
|
|
|
|
2019-05-27 09:59:29 +00:00
|
|
|
uint32_t CompilerMSL::build_constant_uint_array_pointer()
|
|
|
|
{
|
|
|
|
uint32_t offset = ir.increase_bound_by(4);
|
|
|
|
uint32_t type_id = offset;
|
|
|
|
uint32_t type_ptr_id = offset + 1;
|
|
|
|
uint32_t type_ptr_ptr_id = offset + 2;
|
|
|
|
uint32_t var_id = offset + 3;
|
|
|
|
|
|
|
|
// Create a buffer to hold extra data, including the swizzle constants.
|
|
|
|
SPIRType uint_type;
|
|
|
|
uint_type.basetype = SPIRType::UInt;
|
|
|
|
uint_type.width = 32;
|
|
|
|
set<SPIRType>(type_id, uint_type);
|
|
|
|
|
|
|
|
SPIRType uint_type_pointer = uint_type;
|
|
|
|
uint_type_pointer.pointer = true;
|
|
|
|
uint_type_pointer.pointer_depth = 1;
|
|
|
|
uint_type_pointer.parent_type = type_id;
|
|
|
|
uint_type_pointer.storage = StorageClassUniform;
|
|
|
|
set<SPIRType>(type_ptr_id, uint_type_pointer);
|
|
|
|
set_decoration(type_ptr_id, DecorationArrayStride, 4);
|
|
|
|
|
|
|
|
SPIRType uint_type_pointer2 = uint_type_pointer;
|
|
|
|
uint_type_pointer2.pointer_depth++;
|
|
|
|
uint_type_pointer2.parent_type = type_ptr_id;
|
|
|
|
set<SPIRType>(type_ptr_ptr_id, uint_type_pointer2);
|
|
|
|
|
|
|
|
set<SPIRVariable>(var_id, type_ptr_ptr_id, StorageClassUniformConstant);
|
|
|
|
return var_id;
|
2018-02-09 10:27:23 +00:00
|
|
|
}
|
|
|
|
|
2018-04-17 15:43:10 +00:00
|
|
|
static string create_sampler_address(const char *prefix, MSLSamplerAddress addr)
|
|
|
|
{
|
|
|
|
switch (addr)
|
|
|
|
{
|
|
|
|
case MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE:
|
|
|
|
return join(prefix, "address::clamp_to_edge");
|
|
|
|
case MSL_SAMPLER_ADDRESS_CLAMP_TO_ZERO:
|
|
|
|
return join(prefix, "address::clamp_to_zero");
|
|
|
|
case MSL_SAMPLER_ADDRESS_CLAMP_TO_BORDER:
|
|
|
|
return join(prefix, "address::clamp_to_border");
|
|
|
|
case MSL_SAMPLER_ADDRESS_REPEAT:
|
|
|
|
return join(prefix, "address::repeat");
|
|
|
|
case MSL_SAMPLER_ADDRESS_MIRRORED_REPEAT:
|
|
|
|
return join(prefix, "address::mirrored_repeat");
|
|
|
|
default:
|
|
|
|
SPIRV_CROSS_THROW("Invalid sampler addressing mode.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
SPIRType &CompilerMSL::get_stage_in_struct_type()
|
|
|
|
{
|
|
|
|
auto &si_var = get<SPIRVariable>(stage_in_var_id);
|
|
|
|
return get_variable_data_type(si_var);
|
|
|
|
}
|
|
|
|
|
2019-01-08 22:33:32 +00:00
|
|
|
SPIRType &CompilerMSL::get_stage_out_struct_type()
|
|
|
|
{
|
|
|
|
auto &so_var = get<SPIRVariable>(stage_out_var_id);
|
|
|
|
return get_variable_data_type(so_var);
|
|
|
|
}
|
|
|
|
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
SPIRType &CompilerMSL::get_patch_stage_in_struct_type()
|
|
|
|
{
|
|
|
|
auto &si_var = get<SPIRVariable>(patch_stage_in_var_id);
|
|
|
|
return get_variable_data_type(si_var);
|
|
|
|
}
|
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
SPIRType &CompilerMSL::get_patch_stage_out_struct_type()
|
|
|
|
{
|
|
|
|
auto &so_var = get<SPIRVariable>(patch_stage_out_var_id);
|
|
|
|
return get_variable_data_type(so_var);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string CompilerMSL::get_tess_factor_struct_name()
|
|
|
|
{
|
|
|
|
if (get_entry_point().flags.get(ExecutionModeTriangles))
|
|
|
|
return "MTLTriangleTessellationFactorsHalf";
|
|
|
|
return "MTLQuadTessellationFactorsHalf";
|
|
|
|
}
|
|
|
|
|
2018-04-17 15:43:10 +00:00
|
|
|
void CompilerMSL::emit_entry_point_declarations()
|
|
|
|
{
|
2018-04-18 14:19:55 +00:00
|
|
|
// FIXME: Get test coverage here ...
|
|
|
|
|
2018-04-17 15:43:10 +00:00
|
|
|
// Emit constexpr samplers here.
|
2019-06-10 13:41:36 +00:00
|
|
|
for (auto &samp : constexpr_samplers_by_id)
|
2018-04-17 15:43:10 +00:00
|
|
|
{
|
|
|
|
auto &var = get<SPIRVariable>(samp.first);
|
|
|
|
auto &type = get<SPIRType>(var.basetype);
|
|
|
|
if (type.basetype == SPIRType::Sampler)
|
|
|
|
add_resource_name(samp.first);
|
|
|
|
|
2019-04-02 09:19:03 +00:00
|
|
|
SmallVector<string> args;
|
2018-04-17 15:43:10 +00:00
|
|
|
auto &s = samp.second;
|
|
|
|
|
2018-04-18 14:19:55 +00:00
|
|
|
if (s.coord != MSL_SAMPLER_COORD_NORMALIZED)
|
|
|
|
args.push_back("coord::pixel");
|
2018-04-17 15:43:10 +00:00
|
|
|
|
|
|
|
if (s.min_filter == s.mag_filter)
|
2018-04-18 14:19:55 +00:00
|
|
|
{
|
|
|
|
if (s.min_filter != MSL_SAMPLER_FILTER_NEAREST)
|
|
|
|
args.push_back("filter::linear");
|
|
|
|
}
|
2018-04-17 15:43:10 +00:00
|
|
|
else
|
|
|
|
{
|
2018-04-18 14:19:55 +00:00
|
|
|
if (s.min_filter != MSL_SAMPLER_FILTER_NEAREST)
|
|
|
|
args.push_back("min_filter::linear");
|
|
|
|
if (s.mag_filter != MSL_SAMPLER_FILTER_NEAREST)
|
|
|
|
args.push_back("mag_filter::linear");
|
2018-04-17 15:43:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (s.mip_filter)
|
|
|
|
{
|
|
|
|
case MSL_SAMPLER_MIP_FILTER_NONE:
|
2018-04-18 14:19:55 +00:00
|
|
|
// Default
|
2018-04-17 15:43:10 +00:00
|
|
|
break;
|
|
|
|
case MSL_SAMPLER_MIP_FILTER_NEAREST:
|
|
|
|
args.push_back("mip_filter::nearest");
|
|
|
|
break;
|
|
|
|
case MSL_SAMPLER_MIP_FILTER_LINEAR:
|
|
|
|
args.push_back("mip_filter::linear");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
SPIRV_CROSS_THROW("Invalid mip filter.");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s.s_address == s.t_address && s.s_address == s.r_address)
|
2018-04-18 14:19:55 +00:00
|
|
|
{
|
|
|
|
if (s.s_address != MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE)
|
|
|
|
args.push_back(create_sampler_address("", s.s_address));
|
|
|
|
}
|
2018-04-17 15:43:10 +00:00
|
|
|
else
|
|
|
|
{
|
2018-04-18 14:19:55 +00:00
|
|
|
if (s.s_address != MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE)
|
|
|
|
args.push_back(create_sampler_address("s_", s.s_address));
|
|
|
|
if (s.t_address != MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE)
|
|
|
|
args.push_back(create_sampler_address("t_", s.t_address));
|
|
|
|
if (s.r_address != MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE)
|
|
|
|
args.push_back(create_sampler_address("r_", s.r_address));
|
2018-04-17 15:43:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (s.compare_enable)
|
|
|
|
{
|
|
|
|
switch (s.compare_func)
|
|
|
|
{
|
2018-04-18 14:19:55 +00:00
|
|
|
case MSL_SAMPLER_COMPARE_FUNC_ALWAYS:
|
|
|
|
args.push_back("compare_func::always");
|
|
|
|
break;
|
|
|
|
case MSL_SAMPLER_COMPARE_FUNC_NEVER:
|
|
|
|
args.push_back("compare_func::never");
|
|
|
|
break;
|
2018-04-17 15:43:10 +00:00
|
|
|
case MSL_SAMPLER_COMPARE_FUNC_EQUAL:
|
|
|
|
args.push_back("compare_func::equal");
|
|
|
|
break;
|
|
|
|
case MSL_SAMPLER_COMPARE_FUNC_NOT_EQUAL:
|
|
|
|
args.push_back("compare_func::not_equal");
|
|
|
|
break;
|
|
|
|
case MSL_SAMPLER_COMPARE_FUNC_LESS:
|
|
|
|
args.push_back("compare_func::less");
|
|
|
|
break;
|
|
|
|
case MSL_SAMPLER_COMPARE_FUNC_LESS_EQUAL:
|
|
|
|
args.push_back("compare_func::less_equal");
|
|
|
|
break;
|
|
|
|
case MSL_SAMPLER_COMPARE_FUNC_GREATER:
|
|
|
|
args.push_back("compare_func::greater");
|
|
|
|
break;
|
|
|
|
case MSL_SAMPLER_COMPARE_FUNC_GREATER_EQUAL:
|
|
|
|
args.push_back("compare_func::greater_equal");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
SPIRV_CROSS_THROW("Invalid sampler compare function.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s.s_address == MSL_SAMPLER_ADDRESS_CLAMP_TO_BORDER || s.t_address == MSL_SAMPLER_ADDRESS_CLAMP_TO_BORDER ||
|
|
|
|
s.r_address == MSL_SAMPLER_ADDRESS_CLAMP_TO_BORDER)
|
|
|
|
{
|
|
|
|
switch (s.border_color)
|
|
|
|
{
|
|
|
|
case MSL_SAMPLER_BORDER_COLOR_OPAQUE_BLACK:
|
|
|
|
args.push_back("border_color::opaque_black");
|
|
|
|
break;
|
|
|
|
case MSL_SAMPLER_BORDER_COLOR_OPAQUE_WHITE:
|
|
|
|
args.push_back("border_color::opaque_white");
|
|
|
|
break;
|
|
|
|
case MSL_SAMPLER_BORDER_COLOR_TRANSPARENT_BLACK:
|
|
|
|
args.push_back("border_color::transparent_black");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
SPIRV_CROSS_THROW("Invalid sampler border color.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s.anisotropy_enable)
|
|
|
|
args.push_back(join("max_anisotropy(", s.max_anisotropy, ")"));
|
|
|
|
if (s.lod_clamp_enable)
|
2018-04-18 14:31:08 +00:00
|
|
|
{
|
2019-02-28 10:26:26 +00:00
|
|
|
args.push_back(join("lod_clamp(", convert_to_string(s.lod_clamp_min, current_locale_radix_character), ", ",
|
|
|
|
convert_to_string(s.lod_clamp_max, current_locale_radix_character), ")"));
|
2018-04-18 14:31:08 +00:00
|
|
|
}
|
2018-04-17 15:43:10 +00:00
|
|
|
|
|
|
|
statement("constexpr sampler ",
|
|
|
|
type.basetype == SPIRType::SampledImage ? to_sampler_expression(samp.first) : to_name(samp.first),
|
|
|
|
"(", merge(args), ");");
|
|
|
|
}
|
2018-09-27 01:06:05 +00:00
|
|
|
|
|
|
|
// Emit buffer arrays here.
|
|
|
|
for (uint32_t array_id : buffer_arrays)
|
|
|
|
{
|
|
|
|
const auto &var = get<SPIRVariable>(array_id);
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
const auto &type = get_variable_data_type(var);
|
2019-02-25 10:09:00 +00:00
|
|
|
string name = to_name(array_id);
|
2018-09-27 01:06:05 +00:00
|
|
|
statement(get_argument_address_space(var) + " " + type_to_glsl(type) + "* " + name + "[] =");
|
|
|
|
begin_scope();
|
|
|
|
for (uint32_t i = 0; i < type.array[0]; ++i)
|
|
|
|
statement(name + "_" + convert_to_string(i) + ",");
|
|
|
|
end_scope_decl();
|
2019-01-30 12:31:17 +00:00
|
|
|
statement_no_indent("");
|
2018-09-27 01:06:05 +00:00
|
|
|
}
|
2018-09-27 16:01:46 +00:00
|
|
|
// For some reason, without this, we end up emitting the arrays twice.
|
2018-09-27 01:06:05 +00:00
|
|
|
buffer_arrays.clear();
|
2018-04-17 15:43:10 +00:00
|
|
|
}
|
|
|
|
|
2017-03-12 21:42:51 +00:00
|
|
|
string CompilerMSL::compile()
|
2016-04-08 01:25:51 +00:00
|
|
|
{
|
2018-01-06 04:22:36 +00:00
|
|
|
// Do not deal with GLES-isms like precision, older extensions and such.
|
2018-03-09 14:25:25 +00:00
|
|
|
options.vulkan_semantics = true;
|
|
|
|
options.es = false;
|
|
|
|
options.version = 450;
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
backend.null_pointer_literal = "nullptr";
|
2018-01-06 04:22:36 +00:00
|
|
|
backend.float_literal_suffix = false;
|
|
|
|
backend.uint32_t_literal_suffix = true;
|
2019-03-28 18:23:32 +00:00
|
|
|
backend.int16_t_literal_suffix = "";
|
2018-11-02 19:39:55 +00:00
|
|
|
backend.uint16_t_literal_suffix = "u";
|
2018-01-06 04:22:36 +00:00
|
|
|
backend.basic_int_type = "int";
|
|
|
|
backend.basic_uint_type = "uint";
|
2018-10-31 14:43:03 +00:00
|
|
|
backend.basic_int8_type = "char";
|
|
|
|
backend.basic_uint8_type = "uchar";
|
|
|
|
backend.basic_int16_type = "short";
|
|
|
|
backend.basic_uint16_type = "ushort";
|
2018-01-06 04:22:36 +00:00
|
|
|
backend.discard_literal = "discard_fragment()";
|
|
|
|
backend.swizzle_is_function = false;
|
|
|
|
backend.shared_is_implied = false;
|
2018-01-12 22:19:24 +00:00
|
|
|
backend.use_initializer_list = true;
|
2018-01-23 15:36:20 +00:00
|
|
|
backend.use_typed_initializer_list = true;
|
2018-01-06 04:22:36 +00:00
|
|
|
backend.native_row_major_matrix = false;
|
2019-05-13 12:58:27 +00:00
|
|
|
backend.unsized_array_supported = false;
|
2018-02-08 12:06:29 +00:00
|
|
|
backend.can_declare_arrays_inline = false;
|
2018-02-05 11:37:41 +00:00
|
|
|
backend.can_return_array = false;
|
2018-02-09 11:13:33 +00:00
|
|
|
backend.boolean_mix_support = false;
|
2018-02-13 19:44:40 +00:00
|
|
|
backend.allow_truncated_access_chain = true;
|
2018-09-12 08:25:51 +00:00
|
|
|
backend.array_is_value_type = false;
|
2019-02-22 11:02:11 +00:00
|
|
|
backend.comparison_image_samples_scalar = true;
|
2019-04-24 12:12:50 +00:00
|
|
|
backend.native_pointers = true;
|
2019-05-13 12:58:27 +00:00
|
|
|
backend.nonuniform_qualifier = "";
|
2018-01-06 04:22:36 +00:00
|
|
|
|
2019-01-08 22:33:32 +00:00
|
|
|
capture_output_to_buffer = msl_options.capture_output_to_buffer;
|
|
|
|
is_rasterization_disabled = msl_options.disable_rasterization || capture_output_to_buffer;
|
2018-07-26 20:40:32 +00:00
|
|
|
|
2019-05-23 12:54:04 +00:00
|
|
|
fixup_type_alias();
|
2017-11-06 02:34:42 +00:00
|
|
|
replace_illegal_names();
|
|
|
|
|
2017-03-01 02:44:36 +00:00
|
|
|
struct_member_padding.clear();
|
2017-01-29 18:28:20 +00:00
|
|
|
|
2018-07-05 08:42:05 +00:00
|
|
|
build_function_control_flow_graphs_and_analyze();
|
2017-03-06 14:21:00 +00:00
|
|
|
update_active_builtins();
|
2018-02-09 10:27:23 +00:00
|
|
|
analyze_image_and_sampler_usage();
|
2018-09-24 17:10:27 +00:00
|
|
|
analyze_sampled_image_usage();
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
preprocess_op_codes();
|
2018-02-09 10:27:23 +00:00
|
|
|
build_implicit_builtins();
|
|
|
|
|
2017-05-30 00:45:05 +00:00
|
|
|
fixup_image_load_store_access();
|
2017-03-06 14:21:00 +00:00
|
|
|
|
2017-06-30 23:10:46 +00:00
|
|
|
set_enabled_interface_variables(get_active_interface_variables());
|
2019-05-09 10:15:45 +00:00
|
|
|
if (swizzle_buffer_id)
|
|
|
|
active_interface_variables.insert(swizzle_buffer_id);
|
2019-05-27 09:59:29 +00:00
|
|
|
if (buffer_size_buffer_id)
|
|
|
|
active_interface_variables.insert(buffer_size_buffer_id);
|
2017-06-30 23:10:46 +00:00
|
|
|
|
2018-06-12 15:41:35 +00:00
|
|
|
// Create structs to hold input, output and uniform variables.
|
|
|
|
// Do output first to ensure out. is declared at top of entry function.
|
2017-01-01 19:43:20 +00:00
|
|
|
qual_pos_var_name = "";
|
|
|
|
stage_out_var_id = add_interface_block(StorageClassOutput);
|
2019-02-04 05:58:46 +00:00
|
|
|
patch_stage_out_var_id = add_interface_block(StorageClassOutput, true);
|
2018-06-12 15:41:35 +00:00
|
|
|
stage_in_var_id = add_interface_block(StorageClassInput);
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
if (get_execution_model() == ExecutionModelTessellationEvaluation)
|
|
|
|
patch_stage_in_var_id = add_interface_block(StorageClassInput, true);
|
2017-01-01 19:43:20 +00:00
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
if (get_execution_model() == ExecutionModelTessellationControl)
|
|
|
|
stage_out_ptr_var_id = add_interface_block_pointer(stage_out_var_id, StorageClassOutput);
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
if (is_tessellation_shader())
|
2019-02-04 05:58:46 +00:00
|
|
|
stage_in_ptr_var_id = add_interface_block_pointer(stage_in_var_id, StorageClassInput);
|
|
|
|
|
2018-07-26 04:50:33 +00:00
|
|
|
// Metal vertex functions that define no output must disable rasterization and return void.
|
2018-07-26 20:40:32 +00:00
|
|
|
if (!stage_out_var_id)
|
|
|
|
is_rasterization_disabled = true;
|
2018-07-26 04:50:33 +00:00
|
|
|
|
2017-01-01 19:43:20 +00:00
|
|
|
// Convert the use of global variables to recursively-passed function parameters
|
2016-05-05 07:33:18 +00:00
|
|
|
localize_global_variables();
|
2016-10-24 13:24:24 +00:00
|
|
|
extract_global_variables_from_functions();
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2017-11-06 02:34:42 +00:00
|
|
|
// Mark any non-stage-in structs to be tightly packed.
|
|
|
|
mark_packable_structs();
|
2019-05-23 12:54:04 +00:00
|
|
|
reorder_type_alias();
|
2017-11-06 02:34:42 +00:00
|
|
|
|
2019-01-16 23:52:53 +00:00
|
|
|
// Add fixup hooks required by shader inputs and outputs. This needs to happen before
|
|
|
|
// the loop, so the hooks aren't added multiple times.
|
|
|
|
fix_up_shader_inputs_outputs();
|
|
|
|
|
2019-03-14 09:29:34 +00:00
|
|
|
// If we are using argument buffers, we create argument buffer structures for them here.
|
|
|
|
// These buffers will be used in the entry point, not the individual resources.
|
|
|
|
if (msl_options.argument_buffers)
|
|
|
|
{
|
|
|
|
if (!msl_options.supports_msl_version(2, 0))
|
|
|
|
SPIRV_CROSS_THROW("Argument buffers can only be used with MSL 2.0 and up.");
|
|
|
|
analyze_argument_buffers();
|
|
|
|
}
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
uint32_t pass_count = 0;
|
|
|
|
do
|
|
|
|
{
|
|
|
|
if (pass_count >= 3)
|
2017-11-08 18:54:28 +00:00
|
|
|
SPIRV_CROSS_THROW("Over 3 compilation loops detected. Must be a bug!");
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
reset();
|
|
|
|
|
2019-02-12 10:11:29 +00:00
|
|
|
// Start bindings at zero.
|
|
|
|
next_metal_resource_index_buffer = 0;
|
|
|
|
next_metal_resource_index_texture = 0;
|
|
|
|
next_metal_resource_index_sampler = 0;
|
2016-09-17 16:46:10 +00:00
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
// Move constructor for this type is broken on GCC 4.9 ...
|
2019-04-02 09:19:03 +00:00
|
|
|
buffer.reset();
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
emit_header();
|
2019-01-10 08:49:33 +00:00
|
|
|
emit_specialization_constants_and_structs();
|
2016-05-05 07:33:18 +00:00
|
|
|
emit_resources();
|
2016-12-19 02:42:10 +00:00
|
|
|
emit_custom_functions();
|
2018-10-05 09:30:57 +00:00
|
|
|
emit_function(get<SPIRFunction>(ir.default_entry_point), Bitset());
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
pass_count++;
|
2019-04-05 10:06:10 +00:00
|
|
|
} while (is_forcing_recompilation());
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2019-04-02 09:19:03 +00:00
|
|
|
return buffer.str();
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2016-12-19 02:42:10 +00:00
|
|
|
// Register the need to output any custom functions.
|
2017-01-20 16:24:44 +00:00
|
|
|
void CompilerMSL::preprocess_op_codes()
|
2016-10-24 13:24:24 +00:00
|
|
|
{
|
2017-01-20 16:33:59 +00:00
|
|
|
OpCodePreprocessor preproc(*this);
|
2018-10-05 09:30:57 +00:00
|
|
|
traverse_all_reachable_opcodes(get<SPIRFunction>(ir.default_entry_point), preproc);
|
2017-01-20 16:24:44 +00:00
|
|
|
|
2019-04-09 10:28:46 +00:00
|
|
|
suppress_missing_prototypes = preproc.suppress_missing_prototypes;
|
2017-05-19 22:14:08 +00:00
|
|
|
|
|
|
|
if (preproc.uses_atomics)
|
|
|
|
{
|
|
|
|
add_header_line("#include <metal_atomic>");
|
|
|
|
add_pragma_line("#pragma clang diagnostic ignored \"-Wunused-variable\"");
|
|
|
|
}
|
2018-07-26 04:50:33 +00:00
|
|
|
|
2018-07-27 20:53:36 +00:00
|
|
|
// Metal vertex functions that write to resources must disable rasterization and return void.
|
|
|
|
if (preproc.uses_resource_write)
|
2018-07-26 20:40:32 +00:00
|
|
|
is_rasterization_disabled = true;
|
2019-02-04 05:58:46 +00:00
|
|
|
|
|
|
|
// Tessellation control shaders are run as compute functions in Metal, and so
|
|
|
|
// must capture their output to a buffer.
|
|
|
|
if (get_execution_model() == ExecutionModelTessellationControl)
|
|
|
|
{
|
|
|
|
is_rasterization_disabled = true;
|
|
|
|
capture_output_to_buffer = true;
|
|
|
|
}
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
|
|
|
|
if (preproc.needs_subgroup_invocation_id)
|
|
|
|
needs_subgroup_invocation_id = true;
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2017-11-06 02:34:42 +00:00
|
|
|
// Move the Private and Workgroup global variables to the entry function.
|
2016-04-27 17:54:33 +00:00
|
|
|
// Non-constant variables cannot have global scope in Metal.
|
|
|
|
void CompilerMSL::localize_global_variables()
|
|
|
|
{
|
2018-10-05 09:30:57 +00:00
|
|
|
auto &entry_func = get<SPIRFunction>(ir.default_entry_point);
|
2016-05-05 07:33:18 +00:00
|
|
|
auto iter = global_variables.begin();
|
|
|
|
while (iter != global_variables.end())
|
|
|
|
{
|
2017-11-06 02:34:42 +00:00
|
|
|
uint32_t v_id = *iter;
|
|
|
|
auto &var = get<SPIRVariable>(v_id);
|
|
|
|
if (var.storage == StorageClassPrivate || var.storage == StorageClassWorkgroup)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
2019-02-06 09:38:18 +00:00
|
|
|
if (!variable_is_lut(var))
|
|
|
|
entry_func.add_local_variable(v_id);
|
2017-01-16 08:44:40 +00:00
|
|
|
iter = global_variables.erase(iter);
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
iter++;
|
|
|
|
}
|
2016-07-06 20:55:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// For any global variable accessed directly by a function,
|
|
|
|
// extract that variable and add it as an argument to that function.
|
|
|
|
void CompilerMSL::extract_global_variables_from_functions()
|
|
|
|
{
|
2016-10-24 13:24:24 +00:00
|
|
|
// Uniforms
|
2017-03-20 01:06:21 +00:00
|
|
|
unordered_set<uint32_t> global_var_ids;
|
2019-01-10 08:49:33 +00:00
|
|
|
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
|
|
|
|
if (var.storage == StorageClassInput || var.storage == StorageClassOutput ||
|
|
|
|
var.storage == StorageClassUniform || var.storage == StorageClassUniformConstant ||
|
|
|
|
var.storage == StorageClassPushConstant || var.storage == StorageClassStorageBuffer)
|
2016-10-24 13:24:24 +00:00
|
|
|
{
|
2019-01-10 08:49:33 +00:00
|
|
|
global_var_ids.insert(var.self);
|
2016-10-24 13:24:24 +00:00
|
|
|
}
|
2019-01-10 08:49:33 +00:00
|
|
|
});
|
2017-01-26 09:20:42 +00:00
|
|
|
|
2018-05-16 08:49:30 +00:00
|
|
|
// Local vars that are declared in the main function and accessed directly by a function
|
2018-10-05 09:30:57 +00:00
|
|
|
auto &entry_func = get<SPIRFunction>(ir.default_entry_point);
|
2017-01-26 19:05:43 +00:00
|
|
|
for (auto &var : entry_func.local_variables)
|
2018-05-16 08:49:30 +00:00
|
|
|
if (get<SPIRVariable>(var).storage != StorageClassFunction)
|
|
|
|
global_var_ids.insert(var);
|
2016-10-24 13:24:24 +00:00
|
|
|
|
2017-01-31 16:02:44 +00:00
|
|
|
std::set<uint32_t> added_arg_ids;
|
2017-03-20 01:06:21 +00:00
|
|
|
unordered_set<uint32_t> processed_func_ids;
|
2018-10-05 09:30:57 +00:00
|
|
|
extract_global_variables_from_function(ir.default_entry_point, added_arg_ids, global_var_ids, processed_func_ids);
|
2016-07-06 20:55:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// MSL does not support the use of global variables for shader input content.
|
|
|
|
// For any global variable accessed directly by the specified function, extract that variable,
|
|
|
|
// add it as an argument to that function, and the arg to the added_arg_ids collection.
|
2017-01-31 16:02:44 +00:00
|
|
|
void CompilerMSL::extract_global_variables_from_function(uint32_t func_id, std::set<uint32_t> &added_arg_ids,
|
2017-03-20 01:06:21 +00:00
|
|
|
unordered_set<uint32_t> &global_var_ids,
|
|
|
|
unordered_set<uint32_t> &processed_func_ids)
|
2016-07-06 20:55:45 +00:00
|
|
|
{
|
2016-10-24 13:24:24 +00:00
|
|
|
// Avoid processing a function more than once
|
2017-01-15 15:39:03 +00:00
|
|
|
if (processed_func_ids.find(func_id) != processed_func_ids.end())
|
|
|
|
{
|
2017-01-09 15:23:58 +00:00
|
|
|
// Return function global variables
|
|
|
|
added_arg_ids = function_global_vars[func_id];
|
2016-10-24 13:24:24 +00:00
|
|
|
return;
|
2017-01-09 15:23:58 +00:00
|
|
|
}
|
2016-10-24 13:24:24 +00:00
|
|
|
|
|
|
|
processed_func_ids.insert(func_id);
|
|
|
|
|
|
|
|
auto &func = get<SPIRFunction>(func_id);
|
|
|
|
|
|
|
|
// Recursively establish global args added to functions on which we depend.
|
|
|
|
for (auto block : func.blocks)
|
|
|
|
{
|
|
|
|
auto &b = get<SPIRBlock>(block);
|
|
|
|
for (auto &i : b.ops)
|
|
|
|
{
|
|
|
|
auto ops = stream(i);
|
|
|
|
auto op = static_cast<Op>(i.op);
|
|
|
|
|
|
|
|
switch (op)
|
|
|
|
{
|
|
|
|
case OpLoad:
|
2018-02-09 10:27:23 +00:00
|
|
|
case OpInBoundsAccessChain:
|
2016-10-24 13:24:24 +00:00
|
|
|
case OpAccessChain:
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
case OpPtrAccessChain:
|
2019-05-27 09:59:29 +00:00
|
|
|
case OpArrayLength:
|
2016-10-24 13:24:24 +00:00
|
|
|
{
|
|
|
|
uint32_t base_id = ops[2];
|
|
|
|
if (global_var_ids.find(base_id) != global_var_ids.end())
|
|
|
|
added_arg_ids.insert(base_id);
|
2017-01-15 15:39:03 +00:00
|
|
|
|
2018-02-09 11:13:33 +00:00
|
|
|
auto &type = get<SPIRType>(ops[0]);
|
2018-02-10 10:12:05 +00:00
|
|
|
if (type.basetype == SPIRType::Image && type.image.dim == DimSubpassData)
|
2018-02-09 11:13:33 +00:00
|
|
|
{
|
|
|
|
// Implicitly reads gl_FragCoord.
|
|
|
|
assert(builtin_frag_coord_id != 0);
|
|
|
|
added_arg_ids.insert(builtin_frag_coord_id);
|
|
|
|
}
|
|
|
|
|
2016-10-24 13:24:24 +00:00
|
|
|
break;
|
|
|
|
}
|
2018-05-16 08:49:30 +00:00
|
|
|
|
2016-10-24 13:24:24 +00:00
|
|
|
case OpFunctionCall:
|
|
|
|
{
|
2017-12-26 21:32:45 +00:00
|
|
|
// First see if any of the function call args are globals
|
|
|
|
for (uint32_t arg_idx = 3; arg_idx < i.length; arg_idx++)
|
|
|
|
{
|
|
|
|
uint32_t arg_id = ops[arg_idx];
|
|
|
|
if (global_var_ids.find(arg_id) != global_var_ids.end())
|
|
|
|
added_arg_ids.insert(arg_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Then recurse into the function itself to extract globals used internally in the function
|
2016-10-24 13:24:24 +00:00
|
|
|
uint32_t inner_func_id = ops[2];
|
2017-01-31 16:02:44 +00:00
|
|
|
std::set<uint32_t> inner_func_args;
|
2016-10-24 13:24:24 +00:00
|
|
|
extract_global_variables_from_function(inner_func_id, inner_func_args, global_var_ids,
|
|
|
|
processed_func_ids);
|
|
|
|
added_arg_ids.insert(inner_func_args.begin(), inner_func_args.end());
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-05-16 08:49:30 +00:00
|
|
|
case OpStore:
|
|
|
|
{
|
|
|
|
uint32_t base_id = ops[0];
|
|
|
|
if (global_var_ids.find(base_id) != global_var_ids.end())
|
|
|
|
added_arg_ids.insert(base_id);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
case OpSelect:
|
|
|
|
{
|
|
|
|
uint32_t base_id = ops[3];
|
|
|
|
if (global_var_ids.find(base_id) != global_var_ids.end())
|
|
|
|
added_arg_ids.insert(base_id);
|
|
|
|
base_id = ops[4];
|
|
|
|
if (global_var_ids.find(base_id) != global_var_ids.end())
|
|
|
|
added_arg_ids.insert(base_id);
|
2019-01-08 18:48:40 +00:00
|
|
|
break;
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
}
|
|
|
|
|
2016-10-24 13:24:24 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2018-05-16 08:49:30 +00:00
|
|
|
|
|
|
|
// TODO: Add all other operations which can affect memory.
|
|
|
|
// We should consider a more unified system here to reduce boiler-plate.
|
|
|
|
// This kind of analysis is done in several places ...
|
2016-10-24 13:24:24 +00:00
|
|
|
}
|
|
|
|
}
|
2017-01-15 15:39:03 +00:00
|
|
|
|
|
|
|
function_global_vars[func_id] = added_arg_ids;
|
2016-10-24 13:24:24 +00:00
|
|
|
|
|
|
|
// Add the global variables as arguments to the function
|
2018-10-05 09:30:57 +00:00
|
|
|
if (func_id != ir.default_entry_point)
|
2016-10-24 13:24:24 +00:00
|
|
|
{
|
2019-02-04 05:58:46 +00:00
|
|
|
bool added_in = false;
|
|
|
|
bool added_out = false;
|
2016-10-24 13:24:24 +00:00
|
|
|
for (uint32_t arg_id : added_arg_ids)
|
|
|
|
{
|
2018-06-24 19:06:12 +00:00
|
|
|
auto &var = get<SPIRVariable>(arg_id);
|
2017-12-06 17:51:23 +00:00
|
|
|
uint32_t type_id = var.basetype;
|
2018-06-24 19:06:12 +00:00
|
|
|
auto *p_type = &get<SPIRType>(type_id);
|
2019-02-04 05:58:46 +00:00
|
|
|
BuiltIn bi_type = BuiltIn(get_decoration(arg_id, DecorationBuiltIn));
|
|
|
|
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
if (((is_tessellation_shader() && var.storage == StorageClassInput) ||
|
|
|
|
(get_execution_model() == ExecutionModelTessellationControl && var.storage == StorageClassOutput)) &&
|
2019-02-15 23:21:38 +00:00
|
|
|
!(has_decoration(arg_id, DecorationPatch) || is_patch_block(*p_type)) &&
|
2019-02-04 05:58:46 +00:00
|
|
|
(!is_builtin_variable(var) || bi_type == BuiltInPosition || bi_type == BuiltInPointSize ||
|
|
|
|
bi_type == BuiltInClipDistance || bi_type == BuiltInCullDistance ||
|
|
|
|
p_type->basetype == SPIRType::Struct))
|
|
|
|
{
|
|
|
|
// Tessellation control shaders see inputs and per-vertex outputs as arrays.
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
// Similarly, tessellation evaluation shaders see per-vertex inputs as arrays.
|
2019-02-04 05:58:46 +00:00
|
|
|
// We collected them into a structure; we must pass the array of this
|
|
|
|
// structure to the function.
|
|
|
|
std::string name;
|
|
|
|
if (var.storage == StorageClassInput)
|
|
|
|
{
|
|
|
|
if (added_in)
|
|
|
|
continue;
|
|
|
|
name = input_wg_var_name;
|
|
|
|
arg_id = stage_in_ptr_var_id;
|
|
|
|
added_in = true;
|
|
|
|
}
|
|
|
|
else if (var.storage == StorageClassOutput)
|
|
|
|
{
|
|
|
|
if (added_out)
|
|
|
|
continue;
|
|
|
|
name = "gl_out";
|
|
|
|
arg_id = stage_out_ptr_var_id;
|
|
|
|
added_out = true;
|
|
|
|
}
|
|
|
|
type_id = get<SPIRVariable>(arg_id).basetype;
|
|
|
|
p_type = &get<SPIRType>(type_id);
|
|
|
|
uint32_t next_id = ir.increase_bound_by(1);
|
|
|
|
func.add_parameter(type_id, next_id, true);
|
|
|
|
set<SPIRVariable>(next_id, type_id, StorageClassFunction, 0, arg_id);
|
2016-11-27 20:00:06 +00:00
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
set_name(next_id, name);
|
|
|
|
}
|
|
|
|
else if (is_builtin_variable(var) && p_type->basetype == SPIRType::Struct)
|
2018-06-24 19:06:12 +00:00
|
|
|
{
|
2019-01-08 18:54:40 +00:00
|
|
|
// Get the pointee type
|
|
|
|
type_id = get_pointee_type_id(type_id);
|
2018-06-24 19:06:12 +00:00
|
|
|
p_type = &get<SPIRType>(type_id);
|
2016-11-27 20:00:06 +00:00
|
|
|
|
2018-06-24 19:06:12 +00:00
|
|
|
uint32_t mbr_idx = 0;
|
|
|
|
for (auto &mbr_type_id : p_type->member_types)
|
|
|
|
{
|
2019-01-08 10:03:59 +00:00
|
|
|
BuiltIn builtin = BuiltInMax;
|
2018-06-24 19:06:12 +00:00
|
|
|
bool is_builtin = is_member_builtin(*p_type, mbr_idx, &builtin);
|
2018-06-25 15:40:20 +00:00
|
|
|
if (is_builtin && has_active_builtin(builtin, var.storage))
|
2018-06-24 19:06:12 +00:00
|
|
|
{
|
|
|
|
// Add a arg variable with the same type and decorations as the member
|
2018-10-05 09:30:57 +00:00
|
|
|
uint32_t next_ids = ir.increase_bound_by(2);
|
2018-08-06 13:41:10 +00:00
|
|
|
uint32_t ptr_type_id = next_ids + 0;
|
|
|
|
uint32_t var_id = next_ids + 1;
|
|
|
|
|
|
|
|
// Make sure we have an actual pointer type,
|
|
|
|
// so that we will get the appropriate address space when declaring these builtins.
|
|
|
|
auto &ptr = set<SPIRType>(ptr_type_id, get<SPIRType>(mbr_type_id));
|
|
|
|
ptr.self = mbr_type_id;
|
|
|
|
ptr.storage = var.storage;
|
|
|
|
ptr.pointer = true;
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
ptr.parent_type = mbr_type_id;
|
2018-08-06 13:41:10 +00:00
|
|
|
|
|
|
|
func.add_parameter(mbr_type_id, var_id, true);
|
|
|
|
set<SPIRVariable>(var_id, ptr_type_id, StorageClassFunction);
|
2018-10-05 09:30:57 +00:00
|
|
|
ir.meta[var_id].decoration = ir.meta[type_id].members[mbr_idx];
|
2018-06-24 19:06:12 +00:00
|
|
|
}
|
|
|
|
mbr_idx++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-10-05 09:30:57 +00:00
|
|
|
uint32_t next_id = ir.increase_bound_by(1);
|
2018-06-24 19:06:12 +00:00
|
|
|
func.add_parameter(type_id, next_id, true);
|
|
|
|
set<SPIRVariable>(next_id, type_id, StorageClassFunction, 0, arg_id);
|
|
|
|
|
|
|
|
// Ensure the existing variable has a valid name and the new variable has all the same meta info
|
|
|
|
set_name(arg_id, ensure_valid_name(to_name(arg_id), "v"));
|
2018-10-05 09:30:57 +00:00
|
|
|
ir.meta[next_id] = ir.meta[arg_id];
|
2018-06-24 19:06:12 +00:00
|
|
|
}
|
2016-10-24 13:24:24 +00:00
|
|
|
}
|
|
|
|
}
|
2016-04-27 17:54:33 +00:00
|
|
|
}
|
|
|
|
|
2017-11-06 02:34:42 +00:00
|
|
|
// For all variables that are some form of non-input-output interface block, mark that all the structs
|
|
|
|
// that are recursively contained within the type referenced by that variable should be packed tightly.
|
|
|
|
void CompilerMSL::mark_packable_structs()
|
|
|
|
{
|
2019-01-10 08:49:33 +00:00
|
|
|
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
|
|
|
|
if (var.storage != StorageClassFunction && !is_hidden_variable(var))
|
2017-11-06 02:34:42 +00:00
|
|
|
{
|
2019-01-11 08:29:28 +00:00
|
|
|
auto &type = this->get<SPIRType>(var.basetype);
|
2019-01-10 08:49:33 +00:00
|
|
|
if (type.pointer &&
|
|
|
|
(type.storage == StorageClassUniform || type.storage == StorageClassUniformConstant ||
|
|
|
|
type.storage == StorageClassPushConstant || type.storage == StorageClassStorageBuffer) &&
|
|
|
|
(has_decoration(type.self, DecorationBlock) || has_decoration(type.self, DecorationBufferBlock)))
|
|
|
|
mark_as_packable(type);
|
2017-11-06 02:34:42 +00:00
|
|
|
}
|
2019-01-10 08:49:33 +00:00
|
|
|
});
|
2017-11-06 02:34:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If the specified type is a struct, it and any nested structs
|
2019-01-17 10:47:37 +00:00
|
|
|
// are marked as packable with the SPIRVCrossDecorationPacked decoration,
|
2017-11-06 02:34:42 +00:00
|
|
|
void CompilerMSL::mark_as_packable(SPIRType &type)
|
|
|
|
{
|
|
|
|
// If this is not the base type (eg. it's a pointer or array), tunnel down
|
|
|
|
if (type.parent_type)
|
|
|
|
{
|
|
|
|
mark_as_packable(get<SPIRType>(type.parent_type));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type.basetype == SPIRType::Struct)
|
|
|
|
{
|
2019-01-17 10:22:24 +00:00
|
|
|
set_extended_decoration(type.self, SPIRVCrossDecorationPacked);
|
2017-11-06 02:34:42 +00:00
|
|
|
|
|
|
|
// Recurse
|
|
|
|
size_t mbr_cnt = type.member_types.size();
|
|
|
|
for (uint32_t mbr_idx = 0; mbr_idx < mbr_cnt; mbr_idx++)
|
|
|
|
{
|
|
|
|
uint32_t mbr_type_id = type.member_types[mbr_idx];
|
|
|
|
auto &mbr_type = get<SPIRType>(mbr_type_id);
|
|
|
|
mark_as_packable(mbr_type);
|
2018-03-05 15:27:04 +00:00
|
|
|
if (mbr_type.type_alias)
|
|
|
|
{
|
|
|
|
auto &mbr_type_alias = get<SPIRType>(mbr_type.type_alias);
|
|
|
|
mark_as_packable(mbr_type_alias);
|
|
|
|
}
|
2017-11-06 02:34:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-01 19:43:20 +00:00
|
|
|
// If a vertex attribute exists at the location, it is marked as being used by this shader
|
|
|
|
void CompilerMSL::mark_location_as_used_by_shader(uint32_t location, StorageClass storage)
|
2016-04-06 21:42:27 +00:00
|
|
|
{
|
2019-02-12 10:11:29 +00:00
|
|
|
if ((get_execution_model() == ExecutionModelVertex || is_tessellation_shader()) && (storage == StorageClassInput))
|
|
|
|
vtx_attrs_in_use.insert(location);
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-14 13:53:47 +00:00
|
|
|
uint32_t CompilerMSL::get_target_components_for_fragment_location(uint32_t location) const
|
|
|
|
{
|
|
|
|
auto itr = fragment_output_components.find(location);
|
|
|
|
if (itr == end(fragment_output_components))
|
|
|
|
return 4;
|
|
|
|
else
|
|
|
|
return itr->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t CompilerMSL::build_extended_vector_type(uint32_t type_id, uint32_t components)
|
|
|
|
{
|
|
|
|
uint32_t new_type_id = ir.increase_bound_by(1);
|
|
|
|
auto &type = set<SPIRType>(new_type_id, get<SPIRType>(type_id));
|
|
|
|
type.vecsize = components;
|
|
|
|
type.self = new_type_id;
|
2019-02-22 18:11:17 +00:00
|
|
|
type.parent_type = type_id;
|
2019-01-14 13:53:47 +00:00
|
|
|
type.pointer = false;
|
|
|
|
|
|
|
|
return new_type_id;
|
|
|
|
}
|
|
|
|
|
2019-01-08 10:03:59 +00:00
|
|
|
void CompilerMSL::add_plain_variable_to_interface_block(StorageClass storage, const string &ib_var_ref,
|
2019-02-04 05:58:46 +00:00
|
|
|
SPIRType &ib_type, SPIRVariable &var, bool strip_array)
|
2019-01-08 10:03:59 +00:00
|
|
|
{
|
|
|
|
bool is_builtin = is_builtin_variable(var);
|
|
|
|
BuiltIn builtin = BuiltIn(get_decoration(var.self, DecorationBuiltIn));
|
|
|
|
bool is_flat = has_decoration(var.self, DecorationFlat);
|
|
|
|
bool is_noperspective = has_decoration(var.self, DecorationNoPerspective);
|
|
|
|
bool is_centroid = has_decoration(var.self, DecorationCentroid);
|
|
|
|
bool is_sample = has_decoration(var.self, DecorationSample);
|
|
|
|
|
|
|
|
// Add a reference to the variable type to the interface struct.
|
|
|
|
uint32_t ib_mbr_idx = uint32_t(ib_type.member_types.size());
|
|
|
|
uint32_t type_id = ensure_correct_builtin_type(var.basetype, builtin);
|
|
|
|
var.basetype = type_id;
|
2019-01-14 13:53:47 +00:00
|
|
|
|
2019-02-11 23:18:54 +00:00
|
|
|
type_id = get_pointee_type_id(var.basetype);
|
|
|
|
if (strip_array && is_array(get<SPIRType>(type_id)))
|
|
|
|
type_id = get<SPIRType>(type_id).parent_type;
|
|
|
|
auto &type = get<SPIRType>(type_id);
|
2019-01-14 13:53:47 +00:00
|
|
|
uint32_t target_components = 0;
|
|
|
|
uint32_t type_components = type.vecsize;
|
|
|
|
bool padded_output = false;
|
|
|
|
|
|
|
|
// Check if we need to pad fragment output to match a certain number of components.
|
|
|
|
if (get_decoration_bitset(var.self).get(DecorationLocation) && msl_options.pad_fragment_output_components &&
|
|
|
|
get_entry_point().model == ExecutionModelFragment && storage == StorageClassOutput)
|
|
|
|
{
|
|
|
|
uint32_t locn = get_decoration(var.self, DecorationLocation);
|
|
|
|
target_components = get_target_components_for_fragment_location(locn);
|
|
|
|
if (type_components < target_components)
|
|
|
|
{
|
|
|
|
// Make a new type here.
|
|
|
|
type_id = build_extended_vector_type(type_id, target_components);
|
|
|
|
padded_output = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
ib_type.member_types.push_back(type_id);
|
2019-01-08 10:03:59 +00:00
|
|
|
|
|
|
|
// Give the member a name
|
|
|
|
string mbr_name = ensure_valid_name(to_expression(var.self), "m");
|
|
|
|
set_member_name(ib_type.self, ib_mbr_idx, mbr_name);
|
|
|
|
|
|
|
|
// Update the original variable reference to include the structure reference
|
|
|
|
string qual_var_name = ib_var_ref + "." + mbr_name;
|
2019-01-30 09:29:08 +00:00
|
|
|
auto &entry_func = get<SPIRFunction>(ir.default_entry_point);
|
2019-01-14 13:53:47 +00:00
|
|
|
|
|
|
|
if (padded_output)
|
|
|
|
{
|
|
|
|
entry_func.add_local_variable(var.self);
|
|
|
|
vars_needing_early_declaration.push_back(var.self);
|
|
|
|
|
|
|
|
entry_func.fixup_hooks_out.push_back([=, &var]() {
|
|
|
|
SPIRType &padded_type = this->get<SPIRType>(type_id);
|
|
|
|
statement(qual_var_name, " = ", remap_swizzle(padded_type, type_components, to_name(var.self)), ";");
|
|
|
|
});
|
|
|
|
}
|
2019-02-15 23:22:37 +00:00
|
|
|
else if (!strip_array)
|
2019-01-14 13:53:47 +00:00
|
|
|
ir.meta[var.self].decoration.qualified_alias = qual_var_name;
|
2019-01-08 10:03:59 +00:00
|
|
|
|
2019-01-30 09:29:08 +00:00
|
|
|
if (var.storage == StorageClassOutput && var.initializer != 0)
|
|
|
|
{
|
2019-01-30 12:42:50 +00:00
|
|
|
entry_func.fixup_hooks_in.push_back(
|
|
|
|
[=, &var]() { statement(qual_var_name, " = ", to_expression(var.initializer), ";"); });
|
2019-01-30 09:29:08 +00:00
|
|
|
}
|
|
|
|
|
2019-01-08 10:03:59 +00:00
|
|
|
// Copy the variable location from the original variable to the member
|
|
|
|
if (get_decoration_bitset(var.self).get(DecorationLocation))
|
|
|
|
{
|
|
|
|
uint32_t locn = get_decoration(var.self, DecorationLocation);
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
if (storage == StorageClassInput && (get_execution_model() == ExecutionModelVertex || is_tessellation_shader()))
|
2019-01-08 10:03:59 +00:00
|
|
|
{
|
2019-02-11 22:07:43 +00:00
|
|
|
type_id = ensure_correct_attribute_type(var.basetype, locn);
|
2019-01-08 10:03:59 +00:00
|
|
|
var.basetype = type_id;
|
2019-02-11 22:07:43 +00:00
|
|
|
type_id = get_pointee_type_id(type_id);
|
|
|
|
if (strip_array && is_array(get<SPIRType>(type_id)))
|
|
|
|
type_id = get<SPIRType>(type_id).parent_type;
|
|
|
|
ib_type.member_types[ib_mbr_idx] = type_id;
|
2019-01-08 10:03:59 +00:00
|
|
|
}
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn);
|
|
|
|
mark_location_as_used_by_shader(locn, storage);
|
|
|
|
}
|
2019-02-21 04:10:59 +00:00
|
|
|
else if (is_builtin && is_tessellation_shader() && vtx_attrs_by_builtin.count(builtin))
|
|
|
|
{
|
2019-02-12 10:11:29 +00:00
|
|
|
uint32_t locn = vtx_attrs_by_builtin[builtin].location;
|
2019-02-21 04:10:59 +00:00
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn);
|
|
|
|
mark_location_as_used_by_shader(locn, storage);
|
|
|
|
}
|
|
|
|
|
2019-01-08 10:03:59 +00:00
|
|
|
if (get_decoration_bitset(var.self).get(DecorationComponent))
|
|
|
|
{
|
|
|
|
uint32_t comp = get_decoration(var.self, DecorationComponent);
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationComponent, comp);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (get_decoration_bitset(var.self).get(DecorationIndex))
|
|
|
|
{
|
|
|
|
uint32_t index = get_decoration(var.self, DecorationIndex);
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationIndex, index);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mark the member as builtin if needed
|
|
|
|
if (is_builtin)
|
|
|
|
{
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationBuiltIn, builtin);
|
2019-02-22 21:26:00 +00:00
|
|
|
if (builtin == BuiltInPosition && storage == StorageClassOutput)
|
2019-01-08 10:03:59 +00:00
|
|
|
qual_pos_var_name = qual_var_name;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy interpolation decorations if needed
|
|
|
|
if (is_flat)
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationFlat);
|
|
|
|
if (is_noperspective)
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationNoPerspective);
|
|
|
|
if (is_centroid)
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationCentroid);
|
|
|
|
if (is_sample)
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationSample);
|
2019-02-04 05:58:46 +00:00
|
|
|
|
|
|
|
set_extended_member_decoration(ib_type.self, ib_mbr_idx, SPIRVCrossDecorationInterfaceOrigID, var.self);
|
2019-01-08 10:03:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void CompilerMSL::add_composite_variable_to_interface_block(StorageClass storage, const string &ib_var_ref,
|
2019-02-04 05:58:46 +00:00
|
|
|
SPIRType &ib_type, SPIRVariable &var, bool strip_array)
|
2019-01-08 10:03:59 +00:00
|
|
|
{
|
|
|
|
auto &entry_func = get<SPIRFunction>(ir.default_entry_point);
|
2019-02-04 05:58:46 +00:00
|
|
|
auto &var_type = strip_array ? get_variable_element_type(var) : get_variable_data_type(var);
|
2019-01-08 10:03:59 +00:00
|
|
|
uint32_t elem_cnt = 0;
|
|
|
|
|
|
|
|
if (is_matrix(var_type))
|
|
|
|
{
|
|
|
|
if (is_array(var_type))
|
|
|
|
SPIRV_CROSS_THROW("MSL cannot emit arrays-of-matrices in input and output variables.");
|
|
|
|
|
|
|
|
elem_cnt = var_type.columns;
|
|
|
|
}
|
|
|
|
else if (is_array(var_type))
|
|
|
|
{
|
|
|
|
if (var_type.array.size() != 1)
|
|
|
|
SPIRV_CROSS_THROW("MSL cannot emit arrays-of-arrays in input and output variables.");
|
|
|
|
|
|
|
|
elem_cnt = to_array_size_literal(var_type);
|
|
|
|
}
|
|
|
|
|
2019-02-21 04:10:59 +00:00
|
|
|
bool is_builtin = is_builtin_variable(var);
|
|
|
|
BuiltIn builtin = BuiltIn(get_decoration(var.self, DecorationBuiltIn));
|
2019-01-08 10:03:59 +00:00
|
|
|
bool is_flat = has_decoration(var.self, DecorationFlat);
|
|
|
|
bool is_noperspective = has_decoration(var.self, DecorationNoPerspective);
|
|
|
|
bool is_centroid = has_decoration(var.self, DecorationCentroid);
|
|
|
|
bool is_sample = has_decoration(var.self, DecorationSample);
|
|
|
|
|
|
|
|
auto *usable_type = &var_type;
|
|
|
|
if (usable_type->pointer)
|
|
|
|
usable_type = &get<SPIRType>(usable_type->parent_type);
|
|
|
|
while (is_array(*usable_type) || is_matrix(*usable_type))
|
|
|
|
usable_type = &get<SPIRType>(usable_type->parent_type);
|
|
|
|
|
2019-02-21 02:23:02 +00:00
|
|
|
// If a builtin, force it to have the proper name.
|
2019-02-21 04:10:59 +00:00
|
|
|
if (is_builtin)
|
2019-02-21 02:23:02 +00:00
|
|
|
set_name(var.self, builtin_to_glsl(builtin, StorageClassFunction));
|
|
|
|
|
2019-01-08 10:03:59 +00:00
|
|
|
entry_func.add_local_variable(var.self);
|
|
|
|
|
|
|
|
// We need to declare the variable early and at entry-point scope.
|
|
|
|
vars_needing_early_declaration.push_back(var.self);
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < elem_cnt; i++)
|
|
|
|
{
|
|
|
|
// Add a reference to the variable type to the interface struct.
|
|
|
|
uint32_t ib_mbr_idx = uint32_t(ib_type.member_types.size());
|
2019-01-14 13:53:47 +00:00
|
|
|
|
|
|
|
uint32_t target_components = 0;
|
|
|
|
bool padded_output = false;
|
|
|
|
uint32_t type_id = usable_type->self;
|
|
|
|
|
|
|
|
// Check if we need to pad fragment output to match a certain number of components.
|
|
|
|
if (get_decoration_bitset(var.self).get(DecorationLocation) && msl_options.pad_fragment_output_components &&
|
|
|
|
get_entry_point().model == ExecutionModelFragment && storage == StorageClassOutput)
|
|
|
|
{
|
|
|
|
uint32_t locn = get_decoration(var.self, DecorationLocation) + i;
|
|
|
|
target_components = get_target_components_for_fragment_location(locn);
|
|
|
|
if (usable_type->vecsize < target_components)
|
|
|
|
{
|
|
|
|
// Make a new type here.
|
|
|
|
type_id = build_extended_vector_type(usable_type->self, target_components);
|
|
|
|
padded_output = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ib_type.member_types.push_back(get_pointee_type_id(type_id));
|
2019-01-08 10:03:59 +00:00
|
|
|
|
|
|
|
// Give the member a name
|
|
|
|
string mbr_name = ensure_valid_name(join(to_expression(var.self), "_", i), "m");
|
|
|
|
set_member_name(ib_type.self, ib_mbr_idx, mbr_name);
|
|
|
|
|
|
|
|
// There is no qualified alias since we need to flatten the internal array on return.
|
|
|
|
if (get_decoration_bitset(var.self).get(DecorationLocation))
|
|
|
|
{
|
|
|
|
uint32_t locn = get_decoration(var.self, DecorationLocation) + i;
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
if (storage == StorageClassInput &&
|
|
|
|
(get_execution_model() == ExecutionModelVertex || is_tessellation_shader()))
|
2019-01-08 10:03:59 +00:00
|
|
|
{
|
|
|
|
var.basetype = ensure_correct_attribute_type(var.basetype, locn);
|
|
|
|
uint32_t mbr_type_id = ensure_correct_attribute_type(usable_type->self, locn);
|
|
|
|
ib_type.member_types[ib_mbr_idx] = mbr_type_id;
|
|
|
|
}
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn);
|
|
|
|
mark_location_as_used_by_shader(locn, storage);
|
|
|
|
}
|
2019-02-21 04:10:59 +00:00
|
|
|
else if (is_builtin && is_tessellation_shader() && vtx_attrs_by_builtin.count(builtin))
|
|
|
|
{
|
2019-02-12 10:11:29 +00:00
|
|
|
uint32_t locn = vtx_attrs_by_builtin[builtin].location + i;
|
2019-02-21 04:10:59 +00:00
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn);
|
|
|
|
mark_location_as_used_by_shader(locn, storage);
|
|
|
|
}
|
2019-01-08 10:03:59 +00:00
|
|
|
|
|
|
|
if (get_decoration_bitset(var.self).get(DecorationIndex))
|
|
|
|
{
|
|
|
|
uint32_t index = get_decoration(var.self, DecorationIndex);
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationIndex, index);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy interpolation decorations if needed
|
|
|
|
if (is_flat)
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationFlat);
|
|
|
|
if (is_noperspective)
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationNoPerspective);
|
|
|
|
if (is_centroid)
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationCentroid);
|
|
|
|
if (is_sample)
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationSample);
|
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
set_extended_member_decoration(ib_type.self, ib_mbr_idx, SPIRVCrossDecorationInterfaceOrigID, var.self);
|
|
|
|
|
2019-02-15 23:22:37 +00:00
|
|
|
if (!strip_array)
|
2019-01-08 10:03:59 +00:00
|
|
|
{
|
2019-02-15 23:22:37 +00:00
|
|
|
switch (storage)
|
|
|
|
{
|
|
|
|
case StorageClassInput:
|
|
|
|
entry_func.fixup_hooks_in.push_back(
|
|
|
|
[=, &var]() { statement(to_name(var.self), "[", i, "] = ", ib_var_ref, ".", mbr_name, ";"); });
|
|
|
|
break;
|
2019-01-08 10:03:59 +00:00
|
|
|
|
2019-02-15 23:22:37 +00:00
|
|
|
case StorageClassOutput:
|
|
|
|
entry_func.fixup_hooks_out.push_back([=, &var]() {
|
|
|
|
if (padded_output)
|
|
|
|
{
|
|
|
|
auto &padded_type = this->get<SPIRType>(type_id);
|
|
|
|
statement(
|
|
|
|
ib_var_ref, ".", mbr_name, " = ",
|
|
|
|
remap_swizzle(padded_type, usable_type->vecsize, join(to_name(var.self), "[", i, "]")),
|
|
|
|
";");
|
|
|
|
}
|
|
|
|
else
|
|
|
|
statement(ib_var_ref, ".", mbr_name, " = ", to_name(var.self), "[", i, "];");
|
|
|
|
});
|
|
|
|
break;
|
2019-01-08 10:03:59 +00:00
|
|
|
|
2019-02-15 23:22:37 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2019-01-08 10:03:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
uint32_t CompilerMSL::get_accumulated_member_location(const SPIRVariable &var, uint32_t mbr_idx, bool strip_array)
|
2019-01-08 10:03:59 +00:00
|
|
|
{
|
2019-02-04 05:58:46 +00:00
|
|
|
auto &type = strip_array ? get_variable_element_type(var) : get_variable_data_type(var);
|
2019-01-08 10:03:59 +00:00
|
|
|
uint32_t location = get_decoration(var.self, DecorationLocation);
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < mbr_idx; i++)
|
|
|
|
{
|
|
|
|
auto &mbr_type = get<SPIRType>(type.member_types[i]);
|
|
|
|
|
|
|
|
// Start counting from any place we have a new location decoration.
|
|
|
|
if (has_member_decoration(type.self, mbr_idx, DecorationLocation))
|
|
|
|
location = get_member_decoration(type.self, mbr_idx, DecorationLocation);
|
|
|
|
|
|
|
|
uint32_t location_count = 1;
|
|
|
|
|
|
|
|
if (mbr_type.columns > 1)
|
|
|
|
location_count = mbr_type.columns;
|
|
|
|
|
|
|
|
if (!mbr_type.array.empty())
|
|
|
|
for (uint32_t j = 0; j < uint32_t(mbr_type.array.size()); j++)
|
|
|
|
location_count *= to_array_size_literal(mbr_type, j);
|
|
|
|
|
|
|
|
location += location_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
return location;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CompilerMSL::add_composite_member_variable_to_interface_block(StorageClass storage, const string &ib_var_ref,
|
|
|
|
SPIRType &ib_type, SPIRVariable &var,
|
2019-02-04 05:58:46 +00:00
|
|
|
uint32_t mbr_idx, bool strip_array)
|
2019-01-08 10:03:59 +00:00
|
|
|
{
|
|
|
|
auto &entry_func = get<SPIRFunction>(ir.default_entry_point);
|
2019-02-04 05:58:46 +00:00
|
|
|
auto &var_type = strip_array ? get_variable_element_type(var) : get_variable_data_type(var);
|
2019-01-08 10:03:59 +00:00
|
|
|
|
2019-02-21 04:10:59 +00:00
|
|
|
BuiltIn builtin;
|
|
|
|
bool is_builtin = is_member_builtin(var_type, mbr_idx, &builtin);
|
2019-01-08 10:03:59 +00:00
|
|
|
bool is_flat =
|
|
|
|
has_member_decoration(var_type.self, mbr_idx, DecorationFlat) || has_decoration(var.self, DecorationFlat);
|
|
|
|
bool is_noperspective = has_member_decoration(var_type.self, mbr_idx, DecorationNoPerspective) ||
|
|
|
|
has_decoration(var.self, DecorationNoPerspective);
|
|
|
|
bool is_centroid = has_member_decoration(var_type.self, mbr_idx, DecorationCentroid) ||
|
|
|
|
has_decoration(var.self, DecorationCentroid);
|
|
|
|
bool is_sample =
|
|
|
|
has_member_decoration(var_type.self, mbr_idx, DecorationSample) || has_decoration(var.self, DecorationSample);
|
|
|
|
|
|
|
|
uint32_t mbr_type_id = var_type.member_types[mbr_idx];
|
|
|
|
auto &mbr_type = get<SPIRType>(mbr_type_id);
|
|
|
|
uint32_t elem_cnt = 0;
|
|
|
|
|
|
|
|
if (is_matrix(mbr_type))
|
|
|
|
{
|
|
|
|
if (is_array(mbr_type))
|
|
|
|
SPIRV_CROSS_THROW("MSL cannot emit arrays-of-matrices in input and output variables.");
|
|
|
|
|
|
|
|
elem_cnt = mbr_type.columns;
|
|
|
|
}
|
|
|
|
else if (is_array(mbr_type))
|
|
|
|
{
|
|
|
|
if (mbr_type.array.size() != 1)
|
|
|
|
SPIRV_CROSS_THROW("MSL cannot emit arrays-of-arrays in input and output variables.");
|
|
|
|
|
|
|
|
elem_cnt = to_array_size_literal(mbr_type);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto *usable_type = &mbr_type;
|
|
|
|
if (usable_type->pointer)
|
|
|
|
usable_type = &get<SPIRType>(usable_type->parent_type);
|
|
|
|
while (is_array(*usable_type) || is_matrix(*usable_type))
|
|
|
|
usable_type = &get<SPIRType>(usable_type->parent_type);
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < elem_cnt; i++)
|
|
|
|
{
|
|
|
|
// Add a reference to the variable type to the interface struct.
|
|
|
|
uint32_t ib_mbr_idx = uint32_t(ib_type.member_types.size());
|
|
|
|
ib_type.member_types.push_back(usable_type->self);
|
|
|
|
|
|
|
|
// Give the member a name
|
|
|
|
string mbr_name = ensure_valid_name(join(to_qualified_member_name(var_type, mbr_idx), "_", i), "m");
|
|
|
|
set_member_name(ib_type.self, ib_mbr_idx, mbr_name);
|
|
|
|
|
|
|
|
if (has_member_decoration(var_type.self, mbr_idx, DecorationLocation))
|
|
|
|
{
|
|
|
|
uint32_t locn = get_member_decoration(var_type.self, mbr_idx, DecorationLocation) + i;
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn);
|
|
|
|
mark_location_as_used_by_shader(locn, storage);
|
|
|
|
}
|
|
|
|
else if (has_decoration(var.self, DecorationLocation))
|
|
|
|
{
|
2019-02-04 05:58:46 +00:00
|
|
|
uint32_t locn = get_accumulated_member_location(var, mbr_idx, strip_array) + i;
|
2019-01-08 10:03:59 +00:00
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn);
|
|
|
|
mark_location_as_used_by_shader(locn, storage);
|
|
|
|
}
|
2019-02-21 04:10:59 +00:00
|
|
|
else if (is_builtin && is_tessellation_shader() && vtx_attrs_by_builtin.count(builtin))
|
|
|
|
{
|
2019-02-12 10:11:29 +00:00
|
|
|
uint32_t locn = vtx_attrs_by_builtin[builtin].location + i;
|
2019-02-21 04:10:59 +00:00
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn);
|
|
|
|
mark_location_as_used_by_shader(locn, storage);
|
|
|
|
}
|
2019-01-08 10:03:59 +00:00
|
|
|
|
|
|
|
if (has_member_decoration(var_type.self, mbr_idx, DecorationComponent))
|
|
|
|
SPIRV_CROSS_THROW("DecorationComponent on matrices and arrays make little sense.");
|
|
|
|
|
|
|
|
// Copy interpolation decorations if needed
|
|
|
|
if (is_flat)
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationFlat);
|
|
|
|
if (is_noperspective)
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationNoPerspective);
|
|
|
|
if (is_centroid)
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationCentroid);
|
|
|
|
if (is_sample)
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationSample);
|
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
set_extended_member_decoration(ib_type.self, ib_mbr_idx, SPIRVCrossDecorationInterfaceOrigID, var.self);
|
|
|
|
set_extended_member_decoration(ib_type.self, ib_mbr_idx, SPIRVCrossDecorationInterfaceMemberIndex, mbr_idx);
|
|
|
|
|
2019-01-08 10:03:59 +00:00
|
|
|
// Unflatten or flatten from [[stage_in]] or [[stage_out]] as appropriate.
|
2019-02-15 23:22:37 +00:00
|
|
|
if (!strip_array)
|
2019-01-08 10:03:59 +00:00
|
|
|
{
|
2019-02-15 23:22:37 +00:00
|
|
|
switch (storage)
|
|
|
|
{
|
|
|
|
case StorageClassInput:
|
|
|
|
entry_func.fixup_hooks_in.push_back([=, &var, &var_type]() {
|
|
|
|
statement(to_name(var.self), ".", to_member_name(var_type, mbr_idx), "[", i, "] = ", ib_var_ref,
|
|
|
|
".", mbr_name, ";");
|
|
|
|
});
|
|
|
|
break;
|
2019-01-08 10:03:59 +00:00
|
|
|
|
2019-02-15 23:22:37 +00:00
|
|
|
case StorageClassOutput:
|
|
|
|
entry_func.fixup_hooks_out.push_back([=, &var, &var_type]() {
|
|
|
|
statement(ib_var_ref, ".", mbr_name, " = ", to_name(var.self), ".",
|
|
|
|
to_member_name(var_type, mbr_idx), "[", i, "];");
|
|
|
|
});
|
|
|
|
break;
|
2019-01-08 10:03:59 +00:00
|
|
|
|
2019-02-15 23:22:37 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2019-01-08 10:03:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CompilerMSL::add_plain_member_variable_to_interface_block(StorageClass storage, const string &ib_var_ref,
|
2019-02-04 05:58:46 +00:00
|
|
|
SPIRType &ib_type, SPIRVariable &var, uint32_t mbr_idx,
|
|
|
|
bool strip_array)
|
2019-01-08 10:03:59 +00:00
|
|
|
{
|
2019-02-04 05:58:46 +00:00
|
|
|
auto &var_type = strip_array ? get_variable_element_type(var) : get_variable_data_type(var);
|
2019-01-08 10:03:59 +00:00
|
|
|
auto &entry_func = get<SPIRFunction>(ir.default_entry_point);
|
|
|
|
|
|
|
|
BuiltIn builtin = BuiltInMax;
|
|
|
|
bool is_builtin = is_member_builtin(var_type, mbr_idx, &builtin);
|
|
|
|
bool is_flat =
|
|
|
|
has_member_decoration(var_type.self, mbr_idx, DecorationFlat) || has_decoration(var.self, DecorationFlat);
|
|
|
|
bool is_noperspective = has_member_decoration(var_type.self, mbr_idx, DecorationNoPerspective) ||
|
|
|
|
has_decoration(var.self, DecorationNoPerspective);
|
|
|
|
bool is_centroid = has_member_decoration(var_type.self, mbr_idx, DecorationCentroid) ||
|
|
|
|
has_decoration(var.self, DecorationCentroid);
|
|
|
|
bool is_sample =
|
|
|
|
has_member_decoration(var_type.self, mbr_idx, DecorationSample) || has_decoration(var.self, DecorationSample);
|
|
|
|
|
|
|
|
// Add a reference to the member to the interface struct.
|
|
|
|
uint32_t mbr_type_id = var_type.member_types[mbr_idx];
|
|
|
|
uint32_t ib_mbr_idx = uint32_t(ib_type.member_types.size());
|
|
|
|
mbr_type_id = ensure_correct_builtin_type(mbr_type_id, builtin);
|
|
|
|
var_type.member_types[mbr_idx] = mbr_type_id;
|
|
|
|
ib_type.member_types.push_back(mbr_type_id);
|
|
|
|
|
|
|
|
// Give the member a name
|
|
|
|
string mbr_name = ensure_valid_name(to_qualified_member_name(var_type, mbr_idx), "m");
|
|
|
|
set_member_name(ib_type.self, ib_mbr_idx, mbr_name);
|
|
|
|
|
|
|
|
// Update the original variable reference to include the structure reference
|
|
|
|
string qual_var_name = ib_var_ref + "." + mbr_name;
|
|
|
|
|
2019-02-15 23:22:37 +00:00
|
|
|
if (is_builtin && !strip_array)
|
2019-01-08 10:03:59 +00:00
|
|
|
{
|
|
|
|
// For the builtin gl_PerVertex, we cannot treat it as a block anyways,
|
|
|
|
// so redirect to qualified name.
|
|
|
|
set_member_qualified_name(var_type.self, mbr_idx, qual_var_name);
|
|
|
|
}
|
2019-02-15 23:22:37 +00:00
|
|
|
else if (!strip_array)
|
2019-01-08 10:03:59 +00:00
|
|
|
{
|
|
|
|
// Unflatten or flatten from [[stage_in]] or [[stage_out]] as appropriate.
|
|
|
|
switch (storage)
|
|
|
|
{
|
|
|
|
case StorageClassInput:
|
2019-01-14 13:53:47 +00:00
|
|
|
entry_func.fixup_hooks_in.push_back([=, &var, &var_type]() {
|
2019-01-08 10:03:59 +00:00
|
|
|
statement(to_name(var.self), ".", to_member_name(var_type, mbr_idx), " = ", qual_var_name, ";");
|
|
|
|
});
|
|
|
|
break;
|
|
|
|
|
|
|
|
case StorageClassOutput:
|
2019-01-14 13:53:47 +00:00
|
|
|
entry_func.fixup_hooks_out.push_back([=, &var, &var_type]() {
|
2019-01-08 10:03:59 +00:00
|
|
|
statement(qual_var_name, " = ", to_name(var.self), ".", to_member_name(var_type, mbr_idx), ";");
|
|
|
|
});
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy the variable location from the original variable to the member
|
|
|
|
if (has_member_decoration(var_type.self, mbr_idx, DecorationLocation))
|
|
|
|
{
|
|
|
|
uint32_t locn = get_member_decoration(var_type.self, mbr_idx, DecorationLocation);
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
if (storage == StorageClassInput && (get_execution_model() == ExecutionModelVertex || is_tessellation_shader()))
|
2019-01-08 10:03:59 +00:00
|
|
|
{
|
|
|
|
mbr_type_id = ensure_correct_attribute_type(mbr_type_id, locn);
|
|
|
|
var_type.member_types[mbr_idx] = mbr_type_id;
|
|
|
|
ib_type.member_types[ib_mbr_idx] = mbr_type_id;
|
|
|
|
}
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn);
|
|
|
|
mark_location_as_used_by_shader(locn, storage);
|
|
|
|
}
|
|
|
|
else if (has_decoration(var.self, DecorationLocation))
|
|
|
|
{
|
|
|
|
// The block itself might have a location and in this case, all members of the block
|
|
|
|
// receive incrementing locations.
|
2019-02-04 05:58:46 +00:00
|
|
|
uint32_t locn = get_accumulated_member_location(var, mbr_idx, strip_array);
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
if (storage == StorageClassInput && (get_execution_model() == ExecutionModelVertex || is_tessellation_shader()))
|
2019-01-08 10:03:59 +00:00
|
|
|
{
|
|
|
|
mbr_type_id = ensure_correct_attribute_type(mbr_type_id, locn);
|
|
|
|
var_type.member_types[mbr_idx] = mbr_type_id;
|
|
|
|
ib_type.member_types[ib_mbr_idx] = mbr_type_id;
|
|
|
|
}
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn);
|
|
|
|
mark_location_as_used_by_shader(locn, storage);
|
|
|
|
}
|
2019-02-21 04:10:59 +00:00
|
|
|
else if (is_builtin && is_tessellation_shader() && vtx_attrs_by_builtin.count(builtin))
|
|
|
|
{
|
2019-02-12 10:11:29 +00:00
|
|
|
uint32_t locn = 0;
|
|
|
|
auto builtin_itr = vtx_attrs_by_builtin.find(builtin);
|
|
|
|
if (builtin_itr != end(vtx_attrs_by_builtin))
|
|
|
|
locn = builtin_itr->second.location;
|
2019-02-21 04:10:59 +00:00
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn);
|
|
|
|
mark_location_as_used_by_shader(locn, storage);
|
|
|
|
}
|
2019-01-08 10:03:59 +00:00
|
|
|
|
|
|
|
// Copy the component location, if present.
|
|
|
|
if (has_member_decoration(var_type.self, mbr_idx, DecorationComponent))
|
|
|
|
{
|
|
|
|
uint32_t comp = get_member_decoration(var_type.self, mbr_idx, DecorationComponent);
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationComponent, comp);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mark the member as builtin if needed
|
|
|
|
if (is_builtin)
|
|
|
|
{
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationBuiltIn, builtin);
|
2019-02-22 21:26:00 +00:00
|
|
|
if (builtin == BuiltInPosition && storage == StorageClassOutput)
|
2019-01-08 10:03:59 +00:00
|
|
|
qual_pos_var_name = qual_var_name;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy interpolation decorations if needed
|
|
|
|
if (is_flat)
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationFlat);
|
|
|
|
if (is_noperspective)
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationNoPerspective);
|
|
|
|
if (is_centroid)
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationCentroid);
|
|
|
|
if (is_sample)
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationSample);
|
2019-02-04 05:58:46 +00:00
|
|
|
|
|
|
|
set_extended_member_decoration(ib_type.self, ib_mbr_idx, SPIRVCrossDecorationInterfaceOrigID, var.self);
|
|
|
|
set_extended_member_decoration(ib_type.self, ib_mbr_idx, SPIRVCrossDecorationInterfaceMemberIndex, mbr_idx);
|
2019-01-08 10:03:59 +00:00
|
|
|
}
|
|
|
|
|
2019-02-22 18:11:17 +00:00
|
|
|
// In Metal, the tessellation levels are stored as tightly packed half-precision floating point values.
|
|
|
|
// But, stage-in attribute offsets and strides must be multiples of four, so we can't pass the levels
|
|
|
|
// individually. Therefore, we must pass them as vectors. Triangles get a single float4, with the outer
|
|
|
|
// levels in 'xyz' and the inner level in 'w'. Quads get a float4 containing the outer levels and a
|
|
|
|
// float2 containing the inner levels.
|
|
|
|
void CompilerMSL::add_tess_level_input_to_interface_block(const std::string &ib_var_ref, SPIRType &ib_type,
|
|
|
|
SPIRVariable &var)
|
|
|
|
{
|
|
|
|
auto &entry_func = get<SPIRFunction>(ir.default_entry_point);
|
|
|
|
auto &var_type = get_variable_element_type(var);
|
|
|
|
|
|
|
|
BuiltIn builtin = BuiltIn(get_decoration(var.self, DecorationBuiltIn));
|
|
|
|
|
|
|
|
// Force the variable to have the proper name.
|
|
|
|
set_name(var.self, builtin_to_glsl(builtin, StorageClassFunction));
|
|
|
|
|
|
|
|
if (get_entry_point().flags.get(ExecutionModeTriangles))
|
|
|
|
{
|
|
|
|
// Triangles are tricky, because we want only one member in the struct.
|
|
|
|
|
|
|
|
// We need to declare the variable early and at entry-point scope.
|
|
|
|
entry_func.add_local_variable(var.self);
|
|
|
|
vars_needing_early_declaration.push_back(var.self);
|
|
|
|
|
|
|
|
string mbr_name = "gl_TessLevel";
|
|
|
|
|
|
|
|
// If we already added the other one, we can skip this step.
|
|
|
|
if (!added_builtin_tess_level)
|
|
|
|
{
|
|
|
|
// Add a reference to the variable type to the interface struct.
|
|
|
|
uint32_t ib_mbr_idx = uint32_t(ib_type.member_types.size());
|
|
|
|
|
|
|
|
uint32_t type_id = build_extended_vector_type(var_type.self, 4);
|
|
|
|
|
|
|
|
ib_type.member_types.push_back(type_id);
|
|
|
|
|
|
|
|
// Give the member a name
|
|
|
|
set_member_name(ib_type.self, ib_mbr_idx, mbr_name);
|
|
|
|
|
|
|
|
// There is no qualified alias since we need to flatten the internal array on return.
|
|
|
|
if (get_decoration_bitset(var.self).get(DecorationLocation))
|
|
|
|
{
|
|
|
|
uint32_t locn = get_decoration(var.self, DecorationLocation);
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn);
|
|
|
|
mark_location_as_used_by_shader(locn, StorageClassInput);
|
|
|
|
}
|
|
|
|
else if (vtx_attrs_by_builtin.count(builtin))
|
|
|
|
{
|
2019-02-12 10:11:29 +00:00
|
|
|
uint32_t locn = vtx_attrs_by_builtin[builtin].location;
|
2019-02-22 18:11:17 +00:00
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn);
|
|
|
|
mark_location_as_used_by_shader(locn, StorageClassInput);
|
|
|
|
}
|
|
|
|
|
|
|
|
added_builtin_tess_level = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (builtin)
|
|
|
|
{
|
|
|
|
case BuiltInTessLevelOuter:
|
|
|
|
entry_func.fixup_hooks_in.push_back([=, &var]() {
|
|
|
|
statement(to_name(var.self), "[0] = ", ib_var_ref, ".", mbr_name, ".x;");
|
|
|
|
statement(to_name(var.self), "[1] = ", ib_var_ref, ".", mbr_name, ".y;");
|
|
|
|
statement(to_name(var.self), "[2] = ", ib_var_ref, ".", mbr_name, ".z;");
|
|
|
|
});
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BuiltInTessLevelInner:
|
|
|
|
entry_func.fixup_hooks_in.push_back(
|
|
|
|
[=, &var]() { statement(to_name(var.self), "[0] = ", ib_var_ref, ".", mbr_name, ".w;"); });
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
assert(false);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// Add a reference to the variable type to the interface struct.
|
|
|
|
uint32_t ib_mbr_idx = uint32_t(ib_type.member_types.size());
|
|
|
|
|
|
|
|
uint32_t type_id = build_extended_vector_type(var_type.self, builtin == BuiltInTessLevelOuter ? 4 : 2);
|
|
|
|
// Change the type of the variable, too.
|
|
|
|
uint32_t ptr_type_id = ir.increase_bound_by(1);
|
|
|
|
auto &new_var_type = set<SPIRType>(ptr_type_id, get<SPIRType>(type_id));
|
|
|
|
new_var_type.pointer = true;
|
|
|
|
new_var_type.storage = StorageClassInput;
|
|
|
|
new_var_type.parent_type = type_id;
|
|
|
|
var.basetype = ptr_type_id;
|
|
|
|
|
|
|
|
ib_type.member_types.push_back(type_id);
|
|
|
|
|
|
|
|
// Give the member a name
|
|
|
|
string mbr_name = to_expression(var.self);
|
|
|
|
set_member_name(ib_type.self, ib_mbr_idx, mbr_name);
|
|
|
|
|
|
|
|
// Since vectors can be indexed like arrays, there is no need to unpack this. We can
|
|
|
|
// just refer to the vector directly. So give it a qualified alias.
|
|
|
|
string qual_var_name = ib_var_ref + "." + mbr_name;
|
|
|
|
ir.meta[var.self].decoration.qualified_alias = qual_var_name;
|
|
|
|
|
|
|
|
if (get_decoration_bitset(var.self).get(DecorationLocation))
|
|
|
|
{
|
|
|
|
uint32_t locn = get_decoration(var.self, DecorationLocation);
|
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn);
|
|
|
|
mark_location_as_used_by_shader(locn, StorageClassInput);
|
|
|
|
}
|
|
|
|
else if (vtx_attrs_by_builtin.count(builtin))
|
|
|
|
{
|
2019-02-12 10:11:29 +00:00
|
|
|
uint32_t locn = vtx_attrs_by_builtin[builtin].location;
|
2019-02-22 18:11:17 +00:00
|
|
|
set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn);
|
|
|
|
mark_location_as_used_by_shader(locn, StorageClassInput);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-08 10:03:59 +00:00
|
|
|
void CompilerMSL::add_variable_to_interface_block(StorageClass storage, const string &ib_var_ref, SPIRType &ib_type,
|
2019-02-04 05:58:46 +00:00
|
|
|
SPIRVariable &var, bool strip_array)
|
2019-01-08 10:03:59 +00:00
|
|
|
{
|
|
|
|
auto &entry_func = get<SPIRFunction>(ir.default_entry_point);
|
2019-02-04 05:58:46 +00:00
|
|
|
// Tessellation control I/O variables and tessellation evaluation per-point inputs are
|
|
|
|
// usually declared as arrays. In these cases, we want to add the element type to the
|
|
|
|
// interface block, since in Metal it's the interface block itself which is arrayed.
|
|
|
|
auto &var_type = strip_array ? get_variable_element_type(var) : get_variable_data_type(var);
|
2019-02-22 18:11:17 +00:00
|
|
|
bool is_builtin = is_builtin_variable(var);
|
|
|
|
auto builtin = BuiltIn(get_decoration(var.self, DecorationBuiltIn));
|
2019-01-08 10:03:59 +00:00
|
|
|
|
|
|
|
if (var_type.basetype == SPIRType::Struct)
|
|
|
|
{
|
2019-02-11 23:18:54 +00:00
|
|
|
if (!is_builtin_type(var_type) && (!capture_output_to_buffer || storage == StorageClassInput) && !strip_array)
|
2019-01-08 10:03:59 +00:00
|
|
|
{
|
|
|
|
// For I/O blocks or structs, we will need to pass the block itself around
|
|
|
|
// to functions if they are used globally in leaf functions.
|
|
|
|
// Rather than passing down member by member,
|
|
|
|
// we unflatten I/O blocks while running the shader,
|
|
|
|
// and pass the actual struct type down to leaf functions.
|
|
|
|
// We then unflatten inputs, and flatten outputs in the "fixup" stages.
|
|
|
|
entry_func.add_local_variable(var.self);
|
|
|
|
vars_needing_early_declaration.push_back(var.self);
|
|
|
|
}
|
|
|
|
|
2019-02-11 23:18:54 +00:00
|
|
|
if (capture_output_to_buffer && storage != StorageClassInput && !has_decoration(var_type.self, DecorationBlock))
|
2019-01-08 10:03:59 +00:00
|
|
|
{
|
2019-02-11 23:18:54 +00:00
|
|
|
// In Metal tessellation shaders, the interface block itself is arrayed. This makes things
|
|
|
|
// very complicated, since stage-in structures in MSL don't support nested structures.
|
|
|
|
// Luckily, for stage-out when capturing output, we can avoid this and just add
|
|
|
|
// composite members directly, because the stage-out structure is stored to a buffer,
|
|
|
|
// not returned.
|
2019-02-04 05:58:46 +00:00
|
|
|
add_plain_variable_to_interface_block(storage, ib_var_ref, ib_type, var, strip_array);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// Flatten the struct members into the interface struct
|
|
|
|
for (uint32_t mbr_idx = 0; mbr_idx < uint32_t(var_type.member_types.size()); mbr_idx++)
|
2019-01-08 10:03:59 +00:00
|
|
|
{
|
2019-02-22 18:11:17 +00:00
|
|
|
builtin = BuiltInMax;
|
|
|
|
is_builtin = is_member_builtin(var_type, mbr_idx, &builtin);
|
2019-02-04 05:58:46 +00:00
|
|
|
auto &mbr_type = get<SPIRType>(var_type.member_types[mbr_idx]);
|
|
|
|
|
|
|
|
if (!is_builtin || has_active_builtin(builtin, storage))
|
2019-01-08 10:03:59 +00:00
|
|
|
{
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
if ((!is_builtin ||
|
|
|
|
(storage == StorageClassInput && get_execution_model() != ExecutionModelFragment)) &&
|
|
|
|
(storage == StorageClassInput || storage == StorageClassOutput) &&
|
2019-02-04 05:58:46 +00:00
|
|
|
(is_matrix(mbr_type) || is_array(mbr_type)))
|
|
|
|
{
|
|
|
|
add_composite_member_variable_to_interface_block(storage, ib_var_ref, ib_type, var, mbr_idx,
|
|
|
|
strip_array);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
add_plain_member_variable_to_interface_block(storage, ib_var_ref, ib_type, var, mbr_idx,
|
|
|
|
strip_array);
|
|
|
|
}
|
2019-01-08 10:03:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-02-22 18:11:17 +00:00
|
|
|
else if (get_execution_model() == ExecutionModelTessellationEvaluation && storage == StorageClassInput &&
|
|
|
|
!strip_array && is_builtin && (builtin == BuiltInTessLevelOuter || builtin == BuiltInTessLevelInner))
|
|
|
|
{
|
|
|
|
add_tess_level_input_to_interface_block(ib_var_ref, ib_type, var);
|
|
|
|
}
|
2019-01-08 10:03:59 +00:00
|
|
|
else if (var_type.basetype == SPIRType::Boolean || var_type.basetype == SPIRType::Char ||
|
|
|
|
type_is_integral(var_type) || type_is_floating_point(var_type) || var_type.basetype == SPIRType::Boolean)
|
|
|
|
{
|
|
|
|
if (!is_builtin || has_active_builtin(builtin, storage))
|
|
|
|
{
|
|
|
|
// MSL does not allow matrices or arrays in input or output variables, so need to handle it specially.
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
if ((!is_builtin || (storage == StorageClassInput && get_execution_model() != ExecutionModelFragment)) &&
|
2019-02-11 23:18:54 +00:00
|
|
|
(storage == StorageClassInput || (storage == StorageClassOutput && !capture_output_to_buffer)) &&
|
2019-01-08 10:03:59 +00:00
|
|
|
(is_matrix(var_type) || is_array(var_type)))
|
|
|
|
{
|
2019-02-04 05:58:46 +00:00
|
|
|
add_composite_variable_to_interface_block(storage, ib_var_ref, ib_type, var, strip_array);
|
2019-01-08 10:03:59 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2019-02-04 05:58:46 +00:00
|
|
|
add_plain_variable_to_interface_block(storage, ib_var_ref, ib_type, var, strip_array);
|
2019-01-08 10:03:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
// Fix up the mapping of variables to interface member indices, which is used to compile access chains
|
|
|
|
// for per-vertex variables in a tessellation control shader.
|
|
|
|
void CompilerMSL::fix_up_interface_member_indices(StorageClass storage, uint32_t ib_type_id)
|
|
|
|
{
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
// Only needed for tessellation shaders.
|
|
|
|
if (get_execution_model() != ExecutionModelTessellationControl &&
|
|
|
|
!(get_execution_model() == ExecutionModelTessellationEvaluation && storage == StorageClassInput))
|
2019-02-04 05:58:46 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
bool in_array = false;
|
|
|
|
for (uint32_t i = 0; i < ir.meta[ib_type_id].members.size(); i++)
|
|
|
|
{
|
|
|
|
auto &mbr_dec = ir.meta[ib_type_id].members[i];
|
|
|
|
uint32_t var_id = mbr_dec.extended.ib_orig_id;
|
|
|
|
if (!var_id)
|
|
|
|
continue;
|
|
|
|
auto &var = get<SPIRVariable>(var_id);
|
|
|
|
|
|
|
|
// Unfortunately, all this complexity is needed to handle flattened structs and/or
|
|
|
|
// arrays.
|
|
|
|
if (storage == StorageClassInput)
|
|
|
|
{
|
|
|
|
auto &type = get_variable_element_type(var);
|
|
|
|
if (is_array(type) || is_matrix(type))
|
|
|
|
{
|
|
|
|
if (in_array)
|
|
|
|
continue;
|
|
|
|
in_array = true;
|
|
|
|
set_extended_decoration(var_id, SPIRVCrossDecorationInterfaceMemberIndex, i);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (type.basetype == SPIRType::Struct)
|
|
|
|
{
|
|
|
|
uint32_t mbr_idx =
|
|
|
|
get_extended_member_decoration(ib_type_id, i, SPIRVCrossDecorationInterfaceMemberIndex);
|
|
|
|
auto &mbr_type = get<SPIRType>(type.member_types[mbr_idx]);
|
|
|
|
|
|
|
|
if (is_array(mbr_type) || is_matrix(mbr_type))
|
|
|
|
{
|
|
|
|
if (in_array)
|
|
|
|
continue;
|
|
|
|
in_array = true;
|
|
|
|
set_extended_member_decoration(var_id, mbr_idx, SPIRVCrossDecorationInterfaceMemberIndex, i);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
in_array = false;
|
|
|
|
set_extended_member_decoration(var_id, mbr_idx, SPIRVCrossDecorationInterfaceMemberIndex, i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
in_array = false;
|
|
|
|
set_extended_decoration(var_id, SPIRVCrossDecorationInterfaceMemberIndex, i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
set_extended_decoration(var_id, SPIRVCrossDecorationInterfaceMemberIndex, i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-01 19:43:20 +00:00
|
|
|
// Add an interface structure for the type of storage, which is either StorageClassInput or StorageClassOutput.
|
2016-04-06 21:42:27 +00:00
|
|
|
// Returns the ID of the newly added variable, or zero if no variable was added.
|
2019-02-04 05:58:46 +00:00
|
|
|
uint32_t CompilerMSL::add_interface_block(StorageClass storage, bool patch)
|
2016-04-06 21:42:27 +00:00
|
|
|
{
|
2019-05-31 11:19:33 +00:00
|
|
|
// Accumulate the variables that should appear in the interface struct.
|
2019-04-02 09:19:03 +00:00
|
|
|
SmallVector<SPIRVariable *> vars;
|
2019-05-31 11:19:33 +00:00
|
|
|
bool incl_builtins = storage == StorageClassOutput || is_tessellation_shader();
|
2019-06-13 09:33:40 +00:00
|
|
|
bool has_seen_barycentric = false;
|
2019-01-10 08:49:33 +00:00
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
ir.for_each_typed_id<SPIRVariable>([&](uint32_t var_id, SPIRVariable &var) {
|
2019-05-31 11:19:33 +00:00
|
|
|
if (var.storage != storage)
|
|
|
|
return;
|
|
|
|
|
2019-01-11 08:29:28 +00:00
|
|
|
auto &type = this->get<SPIRType>(var.basetype);
|
2019-05-31 11:19:33 +00:00
|
|
|
|
|
|
|
bool is_builtin = is_builtin_variable(var);
|
|
|
|
auto bi_type = BuiltIn(get_decoration(var_id, DecorationBuiltIn));
|
|
|
|
|
|
|
|
// These builtins are part of the stage in/out structs.
|
|
|
|
bool is_interface_block_builtin =
|
|
|
|
(bi_type == BuiltInPosition || bi_type == BuiltInPointSize || bi_type == BuiltInClipDistance ||
|
|
|
|
bi_type == BuiltInCullDistance || bi_type == BuiltInLayer || bi_type == BuiltInViewportIndex ||
|
2019-06-13 09:33:40 +00:00
|
|
|
bi_type == BuiltInBaryCoordNV || bi_type == BuiltInBaryCoordNoPerspNV ||
|
2019-06-12 08:06:59 +00:00
|
|
|
bi_type == BuiltInFragDepth || bi_type == BuiltInFragStencilRefEXT || bi_type == BuiltInSampleMask) ||
|
2019-05-31 11:19:33 +00:00
|
|
|
(get_execution_model() == ExecutionModelTessellationEvaluation &&
|
|
|
|
(bi_type == BuiltInTessLevelOuter || bi_type == BuiltInTessLevelInner));
|
|
|
|
|
|
|
|
bool is_active = interface_variable_exists_in_entry_point(var.self);
|
|
|
|
if (is_builtin && is_active)
|
|
|
|
{
|
|
|
|
// Only emit the builtin if it's active in this entry point. Interface variable list might lie.
|
|
|
|
is_active = has_active_builtin(bi_type, storage);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool filter_patch_decoration = (has_decoration(var_id, DecorationPatch) || is_patch_block(type)) == patch;
|
|
|
|
|
2019-06-13 09:33:40 +00:00
|
|
|
bool hidden = is_hidden_variable(var, incl_builtins);
|
|
|
|
// Barycentric inputs must be emitted in stage-in, because they can have interpolation arguments.
|
|
|
|
if (is_active && (bi_type == BuiltInBaryCoordNV || bi_type == BuiltInBaryCoordNoPerspNV))
|
|
|
|
{
|
|
|
|
if (has_seen_barycentric)
|
|
|
|
SPIRV_CROSS_THROW("Cannot declare both BaryCoordNV and BaryCoordNoPerspNV in same shader in MSL.");
|
|
|
|
has_seen_barycentric = true;
|
|
|
|
hidden = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_active && !hidden && type.pointer && filter_patch_decoration &&
|
2019-05-31 11:19:33 +00:00
|
|
|
(!is_builtin || is_interface_block_builtin))
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
2019-01-10 08:49:33 +00:00
|
|
|
vars.push_back(&var);
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
2019-01-10 08:49:33 +00:00
|
|
|
});
|
2016-05-05 07:33:18 +00:00
|
|
|
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
// If no variables qualify, leave.
|
|
|
|
// For patch input in a tessellation evaluation shader, the per-vertex stage inputs
|
|
|
|
// are included in a special patch control point array.
|
|
|
|
if (vars.empty() && !(storage == StorageClassInput && patch && stage_in_var_id))
|
2016-05-05 07:33:18 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
// Add a new typed variable for this interface structure.
|
|
|
|
// The initializer expression is allocated here, but populated when the function
|
|
|
|
// declaraion is emitted, because it is cleared after each compilation pass.
|
2018-10-05 09:30:57 +00:00
|
|
|
uint32_t next_id = ir.increase_bound_by(3);
|
2016-05-05 07:33:18 +00:00
|
|
|
uint32_t ib_type_id = next_id++;
|
|
|
|
auto &ib_type = set<SPIRType>(ib_type_id);
|
|
|
|
ib_type.basetype = SPIRType::Struct;
|
|
|
|
ib_type.storage = storage;
|
2016-12-18 23:48:15 +00:00
|
|
|
set_decoration(ib_type_id, DecorationBlock);
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
uint32_t ib_var_id = next_id++;
|
|
|
|
auto &var = set<SPIRVariable>(ib_var_id, ib_type_id, storage, 0);
|
|
|
|
var.initializer = next_id++;
|
|
|
|
|
|
|
|
string ib_var_ref;
|
2019-02-04 05:58:46 +00:00
|
|
|
auto &entry_func = get<SPIRFunction>(ir.default_entry_point);
|
2017-01-01 19:43:20 +00:00
|
|
|
switch (storage)
|
2016-12-04 17:32:58 +00:00
|
|
|
{
|
2017-01-01 19:43:20 +00:00
|
|
|
case StorageClassInput:
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
ib_var_ref = patch ? patch_stage_in_var_name : stage_in_var_name;
|
2019-02-04 05:58:46 +00:00
|
|
|
if (get_execution_model() == ExecutionModelTessellationControl)
|
|
|
|
{
|
|
|
|
// Add a hook to populate the shared workgroup memory containing
|
|
|
|
// the gl_in array.
|
|
|
|
entry_func.fixup_hooks_in.push_back([=]() {
|
|
|
|
// Can't use PatchVertices yet; the hook for that may not have run yet.
|
|
|
|
statement("if (", to_expression(builtin_invocation_id_id), " < ", "spvIndirectParams[0])");
|
|
|
|
statement(" ", input_wg_var_name, "[", to_expression(builtin_invocation_id_id), "] = ", ib_var_ref,
|
|
|
|
";");
|
|
|
|
statement("threadgroup_barrier(mem_flags::mem_threadgroup);");
|
2019-02-28 10:26:26 +00:00
|
|
|
statement("if (", to_expression(builtin_invocation_id_id), " >= ", get_entry_point().output_vertices,
|
|
|
|
")");
|
MSL: Return early from helper tesc invocations.
Return after loading the input control point array if there are more
input points than output points, and this was one of the helper
invocations spun off to load the input points. I was hesitant to do this
initially, since the MSL spec has this to say about barriers:
> The `threadgroup_barrier` (or `simdgroup_barrier`) function must be
> encountered by all threads in a threadgroup (or SIMD-group) executing
> the kernel.
That is, if any thread executes the barrier, then all threads must
execute it, or the barrier'd invocations will hang. But, the key words
here seem to be "executing the kernel;" inactive invocations, those that
have already returned, need not encounter the barrier to prevent hangs.
Indeed, I've encountered no problems from doing this, at least on my
hardware. This also fixes a few CTS tests that were failing due to
execution ordering; apparently, my assumption that the later, invalid
data written by the helpers would get overwritten was wrong.
2019-02-24 18:06:54 +00:00
|
|
|
statement(" return;");
|
2019-02-04 05:58:46 +00:00
|
|
|
});
|
|
|
|
}
|
2017-01-01 19:43:20 +00:00
|
|
|
break;
|
2016-12-04 17:32:58 +00:00
|
|
|
|
2017-01-01 19:43:20 +00:00
|
|
|
case StorageClassOutput:
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
2019-02-04 05:58:46 +00:00
|
|
|
ib_var_ref = patch ? patch_stage_out_var_name : stage_out_var_name;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2018-07-26 04:50:33 +00:00
|
|
|
// Add the output interface struct as a local variable to the entry function.
|
|
|
|
// If the entry point should return the output struct, set the entry function
|
|
|
|
// to return the output interface struct, otherwise to return nothing.
|
|
|
|
// Indicate the output var requires early initialization.
|
2018-07-26 20:40:32 +00:00
|
|
|
bool ep_should_return_output = !get_is_rasterization_disabled();
|
2018-07-26 04:50:33 +00:00
|
|
|
uint32_t rtn_id = ep_should_return_output ? ib_var_id : 0;
|
2019-01-08 22:33:32 +00:00
|
|
|
if (!capture_output_to_buffer)
|
|
|
|
{
|
|
|
|
entry_func.add_local_variable(ib_var_id);
|
|
|
|
for (auto &blk_id : entry_func.blocks)
|
|
|
|
{
|
|
|
|
auto &blk = get<SPIRBlock>(blk_id);
|
|
|
|
if (blk.terminator == SPIRBlock::Return)
|
|
|
|
blk.return_value = rtn_id;
|
|
|
|
}
|
|
|
|
vars_needing_early_declaration.push_back(ib_var_id);
|
|
|
|
}
|
|
|
|
else
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
2019-02-04 05:58:46 +00:00
|
|
|
switch (get_execution_model())
|
|
|
|
{
|
|
|
|
case ExecutionModelVertex:
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
case ExecutionModelTessellationEvaluation:
|
2019-02-04 05:58:46 +00:00
|
|
|
// Instead of declaring a struct variable to hold the output and then
|
|
|
|
// copying that to the output buffer, we'll declare the output variable
|
|
|
|
// as a reference to the final output element in the buffer. Then we can
|
|
|
|
// avoid the extra copy.
|
|
|
|
entry_func.fixup_hooks_in.push_back([=]() {
|
|
|
|
if (stage_out_var_id)
|
|
|
|
{
|
|
|
|
// The first member of the indirect buffer is always the number of vertices
|
|
|
|
// to draw.
|
|
|
|
statement("device ", to_name(ir.default_entry_point), "_", ib_var_ref, "& ", ib_var_ref, " = ",
|
|
|
|
output_buffer_var_name, "[(", to_expression(builtin_instance_idx_id), " - ",
|
|
|
|
to_expression(builtin_base_instance_id), ") * spvIndirectParams[0] + ",
|
|
|
|
to_expression(builtin_vertex_idx_id), " - ", to_expression(builtin_base_vertex_id),
|
|
|
|
"];");
|
|
|
|
}
|
|
|
|
});
|
|
|
|
break;
|
|
|
|
case ExecutionModelTessellationControl:
|
|
|
|
if (patch)
|
|
|
|
entry_func.fixup_hooks_in.push_back([=]() {
|
|
|
|
statement("device ", to_name(ir.default_entry_point), "_", ib_var_ref, "& ", ib_var_ref, " = ",
|
|
|
|
patch_output_buffer_var_name, "[", to_expression(builtin_primitive_id_id), "];");
|
|
|
|
});
|
|
|
|
else
|
|
|
|
entry_func.fixup_hooks_in.push_back([=]() {
|
|
|
|
statement("device ", to_name(ir.default_entry_point), "_", ib_var_ref, "* gl_out = &",
|
|
|
|
output_buffer_var_name, "[", to_expression(builtin_primitive_id_id), " * ",
|
|
|
|
get_entry_point().output_vertices, "];");
|
|
|
|
});
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
2017-01-01 19:43:20 +00:00
|
|
|
break;
|
|
|
|
}
|
2017-01-15 15:39:03 +00:00
|
|
|
|
2017-01-01 19:43:20 +00:00
|
|
|
default:
|
|
|
|
break;
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
set_name(ib_type_id, to_name(ir.default_entry_point) + "_" + ib_var_ref);
|
2016-05-05 07:33:18 +00:00
|
|
|
set_name(ib_var_id, ib_var_ref);
|
|
|
|
|
2019-06-12 08:06:59 +00:00
|
|
|
for (auto *p_var : vars)
|
2019-02-04 05:58:46 +00:00
|
|
|
{
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
bool strip_array =
|
|
|
|
(get_execution_model() == ExecutionModelTessellationControl ||
|
|
|
|
(get_execution_model() == ExecutionModelTessellationEvaluation && storage == StorageClassInput)) &&
|
|
|
|
!patch;
|
2019-02-04 05:58:46 +00:00
|
|
|
add_variable_to_interface_block(storage, ib_var_ref, ib_type, *p_var, strip_array);
|
|
|
|
}
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2017-06-30 23:10:46 +00:00
|
|
|
// Sort the members of the structure by their locations.
|
2018-10-05 09:30:57 +00:00
|
|
|
MemberSorter member_sorter(ib_type, ir.meta[ib_type_id], MemberSorter::Location);
|
2017-01-28 07:58:25 +00:00
|
|
|
member_sorter.sort();
|
2017-01-15 15:39:03 +00:00
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
// The member indices were saved to the original variables, but after the members
|
|
|
|
// were sorted, those indices are now likely incorrect. Fix those up now.
|
|
|
|
if (!patch)
|
|
|
|
fix_up_interface_member_indices(storage, ib_type_id);
|
|
|
|
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
// For patch inputs, add one more member, holding the array of control point data.
|
|
|
|
if (get_execution_model() == ExecutionModelTessellationEvaluation && storage == StorageClassInput && patch &&
|
|
|
|
stage_in_var_id)
|
|
|
|
{
|
|
|
|
uint32_t pcp_type_id = ir.increase_bound_by(1);
|
|
|
|
auto &pcp_type = set<SPIRType>(pcp_type_id, ib_type);
|
|
|
|
pcp_type.basetype = SPIRType::ControlPointArray;
|
|
|
|
pcp_type.parent_type = pcp_type.type_alias = get_stage_in_struct_type().self;
|
|
|
|
pcp_type.storage = storage;
|
|
|
|
ir.meta[pcp_type_id] = ir.meta[ib_type.self];
|
2019-02-21 21:11:45 +00:00
|
|
|
uint32_t mbr_idx = uint32_t(ib_type.member_types.size());
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
ib_type.member_types.push_back(pcp_type_id);
|
|
|
|
set_member_name(ib_type.self, mbr_idx, "gl_in");
|
|
|
|
}
|
|
|
|
|
2017-06-30 23:10:46 +00:00
|
|
|
return ib_var_id;
|
|
|
|
}
|
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
uint32_t CompilerMSL::add_interface_block_pointer(uint32_t ib_var_id, StorageClass storage)
|
|
|
|
{
|
|
|
|
if (!ib_var_id)
|
|
|
|
return 0;
|
|
|
|
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
uint32_t ib_ptr_var_id;
|
2019-02-04 05:58:46 +00:00
|
|
|
uint32_t next_id = ir.increase_bound_by(3);
|
|
|
|
auto &ib_type = expression_type(ib_var_id);
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
if (get_execution_model() == ExecutionModelTessellationControl)
|
|
|
|
{
|
|
|
|
// Tessellation control per-vertex I/O is presented as an array, so we must
|
|
|
|
// do the same with our struct here.
|
|
|
|
uint32_t ib_ptr_type_id = next_id++;
|
|
|
|
auto &ib_ptr_type = set<SPIRType>(ib_ptr_type_id, ib_type);
|
|
|
|
ib_ptr_type.parent_type = ib_ptr_type.type_alias = ib_type.self;
|
|
|
|
ib_ptr_type.pointer = true;
|
|
|
|
ib_ptr_type.storage = storage == StorageClassInput ? StorageClassWorkgroup : StorageClassStorageBuffer;
|
|
|
|
ir.meta[ib_ptr_type_id] = ir.meta[ib_type.self];
|
|
|
|
// To ensure that get_variable_data_type() doesn't strip off the pointer,
|
|
|
|
// which we need, use another pointer.
|
|
|
|
uint32_t ib_ptr_ptr_type_id = next_id++;
|
|
|
|
auto &ib_ptr_ptr_type = set<SPIRType>(ib_ptr_ptr_type_id, ib_ptr_type);
|
|
|
|
ib_ptr_ptr_type.parent_type = ib_ptr_type_id;
|
|
|
|
ib_ptr_ptr_type.type_alias = ib_type.self;
|
|
|
|
ib_ptr_ptr_type.storage = StorageClassFunction;
|
|
|
|
ir.meta[ib_ptr_ptr_type_id] = ir.meta[ib_type.self];
|
|
|
|
|
|
|
|
ib_ptr_var_id = next_id;
|
|
|
|
set<SPIRVariable>(ib_ptr_var_id, ib_ptr_ptr_type_id, StorageClassFunction, 0);
|
|
|
|
set_name(ib_ptr_var_id, storage == StorageClassInput ? input_wg_var_name : "gl_out");
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// Tessellation evaluation per-vertex inputs are also presented as arrays.
|
|
|
|
// But, in Metal, this array uses a very special type, 'patch_control_point<T>',
|
|
|
|
// which is a container that can be used to access the control point data.
|
|
|
|
// To represent this, a special 'ControlPointArray' type has been added to the
|
|
|
|
// SPIRV-Cross type system. It should only be generated by and seen in the MSL
|
|
|
|
// backend (i.e. this one).
|
|
|
|
uint32_t pcp_type_id = next_id++;
|
|
|
|
auto &pcp_type = set<SPIRType>(pcp_type_id, ib_type);
|
|
|
|
pcp_type.basetype = SPIRType::ControlPointArray;
|
|
|
|
pcp_type.parent_type = pcp_type.type_alias = ib_type.self;
|
|
|
|
pcp_type.storage = storage;
|
|
|
|
ir.meta[pcp_type_id] = ir.meta[ib_type.self];
|
|
|
|
|
|
|
|
ib_ptr_var_id = next_id;
|
|
|
|
set<SPIRVariable>(ib_ptr_var_id, pcp_type_id, storage, 0);
|
|
|
|
set_name(ib_ptr_var_id, "gl_in");
|
|
|
|
ir.meta[ib_ptr_var_id].decoration.qualified_alias = join(patch_stage_in_var_name, ".gl_in");
|
|
|
|
}
|
2019-02-04 05:58:46 +00:00
|
|
|
return ib_ptr_var_id;
|
|
|
|
}
|
|
|
|
|
2018-02-13 19:44:40 +00:00
|
|
|
// Ensure that the type is compatible with the builtin.
|
|
|
|
// If it is, simply return the given type ID.
|
|
|
|
// Otherwise, create a new type, and return it's ID.
|
|
|
|
uint32_t CompilerMSL::ensure_correct_builtin_type(uint32_t type_id, BuiltIn builtin)
|
|
|
|
{
|
|
|
|
auto &type = get<SPIRType>(type_id);
|
|
|
|
|
2018-09-12 23:00:17 +00:00
|
|
|
if ((builtin == BuiltInSampleMask && is_array(type)) ||
|
2019-06-12 08:06:59 +00:00
|
|
|
((builtin == BuiltInLayer || builtin == BuiltInViewportIndex || builtin == BuiltInFragStencilRefEXT) &&
|
|
|
|
type.basetype != SPIRType::UInt))
|
2018-02-13 19:44:40 +00:00
|
|
|
{
|
2018-10-05 09:30:57 +00:00
|
|
|
uint32_t next_id = ir.increase_bound_by(type.pointer ? 2 : 1);
|
2018-02-13 19:44:40 +00:00
|
|
|
uint32_t base_type_id = next_id++;
|
|
|
|
auto &base_type = set<SPIRType>(base_type_id);
|
|
|
|
base_type.basetype = SPIRType::UInt;
|
|
|
|
base_type.width = 32;
|
|
|
|
|
|
|
|
if (!type.pointer)
|
|
|
|
return base_type_id;
|
|
|
|
|
|
|
|
uint32_t ptr_type_id = next_id++;
|
|
|
|
auto &ptr_type = set<SPIRType>(ptr_type_id);
|
|
|
|
ptr_type = base_type;
|
|
|
|
ptr_type.pointer = true;
|
|
|
|
ptr_type.storage = type.storage;
|
|
|
|
ptr_type.parent_type = base_type_id;
|
|
|
|
return ptr_type_id;
|
|
|
|
}
|
|
|
|
|
|
|
|
return type_id;
|
|
|
|
}
|
|
|
|
|
2018-11-28 22:26:45 +00:00
|
|
|
// Ensure that the type is compatible with the vertex attribute.
|
|
|
|
// If it is, simply return the given type ID.
|
|
|
|
// Otherwise, create a new type, and return its ID.
|
|
|
|
uint32_t CompilerMSL::ensure_correct_attribute_type(uint32_t type_id, uint32_t location)
|
|
|
|
{
|
|
|
|
auto &type = get<SPIRType>(type_id);
|
|
|
|
|
2019-02-12 10:11:29 +00:00
|
|
|
auto p_va = vtx_attrs_by_location.find(location);
|
|
|
|
if (p_va == end(vtx_attrs_by_location))
|
2018-11-28 22:26:45 +00:00
|
|
|
return type_id;
|
|
|
|
|
2019-02-12 10:11:29 +00:00
|
|
|
switch (p_va->second.format)
|
2018-12-04 19:54:29 +00:00
|
|
|
{
|
|
|
|
case MSL_VERTEX_FORMAT_UINT8:
|
2018-11-28 22:26:45 +00:00
|
|
|
{
|
|
|
|
switch (type.basetype)
|
|
|
|
{
|
|
|
|
case SPIRType::UByte:
|
|
|
|
case SPIRType::UShort:
|
|
|
|
case SPIRType::UInt:
|
|
|
|
return type_id;
|
|
|
|
case SPIRType::Short:
|
|
|
|
case SPIRType::Int:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
SPIRV_CROSS_THROW("Vertex attribute type mismatch between host and shader");
|
|
|
|
}
|
|
|
|
uint32_t next_id = ir.increase_bound_by(type.pointer ? 2 : 1);
|
|
|
|
uint32_t base_type_id = next_id++;
|
|
|
|
auto &base_type = set<SPIRType>(base_type_id);
|
|
|
|
base_type = type;
|
|
|
|
base_type.basetype = type.basetype == SPIRType::Short ? SPIRType::UShort : SPIRType::UInt;
|
2019-01-16 22:15:01 +00:00
|
|
|
base_type.pointer = false;
|
2018-11-28 22:26:45 +00:00
|
|
|
|
|
|
|
if (!type.pointer)
|
|
|
|
return base_type_id;
|
|
|
|
|
|
|
|
uint32_t ptr_type_id = next_id++;
|
|
|
|
auto &ptr_type = set<SPIRType>(ptr_type_id);
|
|
|
|
ptr_type = base_type;
|
|
|
|
ptr_type.pointer = true;
|
|
|
|
ptr_type.storage = type.storage;
|
|
|
|
ptr_type.parent_type = base_type_id;
|
|
|
|
return ptr_type_id;
|
|
|
|
}
|
2019-02-12 10:11:29 +00:00
|
|
|
|
2018-12-04 19:54:29 +00:00
|
|
|
case MSL_VERTEX_FORMAT_UINT16:
|
2018-11-28 22:26:45 +00:00
|
|
|
{
|
|
|
|
switch (type.basetype)
|
|
|
|
{
|
|
|
|
case SPIRType::UShort:
|
|
|
|
case SPIRType::UInt:
|
|
|
|
return type_id;
|
|
|
|
case SPIRType::Int:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
SPIRV_CROSS_THROW("Vertex attribute type mismatch between host and shader");
|
|
|
|
}
|
|
|
|
uint32_t next_id = ir.increase_bound_by(type.pointer ? 2 : 1);
|
|
|
|
uint32_t base_type_id = next_id++;
|
|
|
|
auto &base_type = set<SPIRType>(base_type_id);
|
|
|
|
base_type = type;
|
|
|
|
base_type.basetype = SPIRType::UInt;
|
2019-01-16 22:15:01 +00:00
|
|
|
base_type.pointer = false;
|
2018-11-28 22:26:45 +00:00
|
|
|
|
|
|
|
if (!type.pointer)
|
|
|
|
return base_type_id;
|
|
|
|
|
|
|
|
uint32_t ptr_type_id = next_id++;
|
|
|
|
auto &ptr_type = set<SPIRType>(ptr_type_id);
|
|
|
|
ptr_type = base_type;
|
|
|
|
ptr_type.pointer = true;
|
|
|
|
ptr_type.storage = type.storage;
|
|
|
|
ptr_type.parent_type = base_type_id;
|
|
|
|
return ptr_type_id;
|
|
|
|
}
|
|
|
|
|
2019-02-12 10:11:29 +00:00
|
|
|
default:
|
2018-12-04 19:54:29 +00:00
|
|
|
case MSL_VERTEX_FORMAT_OTHER:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-11-28 22:26:45 +00:00
|
|
|
return type_id;
|
|
|
|
}
|
|
|
|
|
2017-03-12 21:42:51 +00:00
|
|
|
// Sort the members of the struct type by offset, and pack and then pad members where needed
|
|
|
|
// to align MSL members with SPIR-V offsets. The struct members are iterated twice. Packing
|
|
|
|
// occurs first, followed by padding, because packing a member reduces both its size and its
|
|
|
|
// natural alignment, possibly requiring a padding member to be added ahead of it.
|
2017-03-01 02:44:36 +00:00
|
|
|
void CompilerMSL::align_struct(SPIRType &ib_type)
|
2017-01-29 18:28:20 +00:00
|
|
|
{
|
2017-03-01 02:44:36 +00:00
|
|
|
uint32_t &ib_type_id = ib_type.self;
|
2017-01-29 18:28:20 +00:00
|
|
|
|
|
|
|
// Sort the members of the interface structure by their offset.
|
2017-03-01 02:44:36 +00:00
|
|
|
// They should already be sorted per SPIR-V spec anyway.
|
2018-10-05 09:30:57 +00:00
|
|
|
MemberSorter member_sorter(ib_type, ir.meta[ib_type_id], MemberSorter::Offset);
|
2017-01-29 18:28:20 +00:00
|
|
|
member_sorter.sort();
|
|
|
|
|
2017-03-01 02:44:36 +00:00
|
|
|
uint32_t mbr_cnt = uint32_t(ib_type.member_types.size());
|
2017-03-12 21:42:51 +00:00
|
|
|
|
|
|
|
// Test the alignment of each member, and if a member should be closer to the previous
|
|
|
|
// member than the default spacing expects, it is likely that the previous member is in
|
|
|
|
// a packed format. If so, and the previous member is packable, pack it.
|
|
|
|
// For example...this applies to any 3-element vector that is followed by a scalar.
|
2018-11-01 13:56:25 +00:00
|
|
|
uint32_t curr_offset = 0;
|
2017-03-12 21:42:51 +00:00
|
|
|
for (uint32_t mbr_idx = 0; mbr_idx < mbr_cnt; mbr_idx++)
|
|
|
|
{
|
2018-02-11 21:52:57 +00:00
|
|
|
if (is_member_packable(ib_type, mbr_idx))
|
2019-01-17 10:22:24 +00:00
|
|
|
{
|
|
|
|
set_extended_member_decoration(ib_type_id, mbr_idx, SPIRVCrossDecorationPacked);
|
2019-01-17 10:29:50 +00:00
|
|
|
set_extended_member_decoration(ib_type_id, mbr_idx, SPIRVCrossDecorationPackedType,
|
|
|
|
ib_type.member_types[mbr_idx]);
|
2019-01-17 10:22:24 +00:00
|
|
|
}
|
2017-03-12 21:42:51 +00:00
|
|
|
|
2017-03-06 16:00:23 +00:00
|
|
|
// Align current offset to the current member's default alignment.
|
|
|
|
size_t align_mask = get_declared_struct_member_alignment(ib_type, mbr_idx) - 1;
|
2019-01-28 13:39:05 +00:00
|
|
|
uint32_t aligned_curr_offset = uint32_t((curr_offset + align_mask) & ~align_mask);
|
2017-03-06 16:00:23 +00:00
|
|
|
|
2017-03-12 21:42:51 +00:00
|
|
|
// Fetch the member offset as declared in the SPIRV.
|
2017-01-29 18:28:20 +00:00
|
|
|
uint32_t mbr_offset = get_member_decoration(ib_type_id, mbr_idx, DecorationOffset);
|
2019-01-28 13:39:05 +00:00
|
|
|
if (mbr_offset > aligned_curr_offset)
|
2017-01-29 18:28:20 +00:00
|
|
|
{
|
2017-03-03 01:57:26 +00:00
|
|
|
// Since MSL and SPIR-V have slightly different struct member alignment and
|
|
|
|
// size rules, we'll pad to standard C-packing rules. If the member is farther
|
|
|
|
// away than C-packing, expects, add an inert padding member before the the member.
|
2017-03-01 02:44:36 +00:00
|
|
|
MSLStructMemberKey key = get_struct_member_key(ib_type_id, mbr_idx);
|
2017-03-12 21:42:51 +00:00
|
|
|
struct_member_padding[key] = mbr_offset - curr_offset;
|
2017-03-03 01:57:26 +00:00
|
|
|
}
|
2017-03-01 02:44:36 +00:00
|
|
|
|
2017-03-12 21:42:51 +00:00
|
|
|
// Increment the current offset to be positioned immediately after the current member.
|
2018-11-01 13:56:25 +00:00
|
|
|
// Don't do this for the last member since it can be unsized, and it is not relevant for padding purposes here.
|
|
|
|
if (mbr_idx + 1 < mbr_cnt)
|
|
|
|
curr_offset = mbr_offset + uint32_t(get_declared_struct_member_size(ib_type, mbr_idx));
|
2017-01-29 18:28:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-03 01:57:26 +00:00
|
|
|
// Returns whether the specified struct member supports a packable type
|
|
|
|
// variation that is smaller than the unpacked variation of that type.
|
|
|
|
bool CompilerMSL::is_member_packable(SPIRType &ib_type, uint32_t index)
|
|
|
|
{
|
2018-02-11 21:52:57 +00:00
|
|
|
// We've already marked it as packable
|
2019-01-17 10:22:24 +00:00
|
|
|
if (has_extended_member_decoration(ib_type.self, index, SPIRVCrossDecorationPacked))
|
2017-03-03 01:57:26 +00:00
|
|
|
return true;
|
|
|
|
|
2018-02-11 21:52:57 +00:00
|
|
|
auto &mbr_type = get<SPIRType>(ib_type.member_types[index]);
|
|
|
|
|
2019-01-16 15:16:39 +00:00
|
|
|
uint32_t component_size = mbr_type.width / 8;
|
|
|
|
uint32_t unpacked_mbr_size;
|
|
|
|
if (mbr_type.vecsize == 3)
|
|
|
|
unpacked_mbr_size = component_size * (mbr_type.vecsize + 1) * mbr_type.columns;
|
|
|
|
else
|
|
|
|
unpacked_mbr_size = component_size * mbr_type.vecsize * mbr_type.columns;
|
|
|
|
|
|
|
|
// Special case for packing. Check for float[] or vec2[] in std140 layout. Here we actually need to pad out instead,
|
2019-01-17 09:06:23 +00:00
|
|
|
// but we will use the same mechanism.
|
2019-01-17 10:29:50 +00:00
|
|
|
if (is_array(mbr_type) && (is_scalar(mbr_type) || is_vector(mbr_type)) && mbr_type.vecsize <= 2 &&
|
2019-01-16 15:16:39 +00:00
|
|
|
type_struct_member_array_stride(ib_type, index) == 4 * component_size)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-01-28 10:32:06 +00:00
|
|
|
// Check for array of struct, where the SPIR-V declares an array stride which is larger than the struct itself.
|
|
|
|
// This can happen for struct A { float a }; A a[]; in std140 layout.
|
|
|
|
// TODO: Emit a padded struct which can be used for this purpose.
|
|
|
|
if (is_array(mbr_type) && mbr_type.basetype == SPIRType::Struct)
|
|
|
|
{
|
|
|
|
size_t declared_struct_size = get_declared_struct_size(mbr_type);
|
|
|
|
size_t alignment = get_declared_struct_member_alignment(ib_type, index);
|
|
|
|
declared_struct_size = (declared_struct_size + alignment - 1) & ~(alignment - 1);
|
|
|
|
if (type_struct_member_array_stride(ib_type, index) > declared_struct_size)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-01-17 10:47:37 +00:00
|
|
|
// TODO: Another sanity check for matrices. We currently do not support std140 matrices which need to be padded out per column.
|
2019-01-16 15:16:39 +00:00
|
|
|
//if (is_matrix(mbr_type) && mbr_type.vecsize <= 2 && type_struct_member_matrix_stride(ib_type, index) == 16)
|
|
|
|
// SPIRV_CROSS_THROW("Currently cannot support matrices with small vector size in std140 layout.");
|
|
|
|
|
2018-11-09 19:14:52 +00:00
|
|
|
// Only vectors or 3-row matrices need to be packed.
|
|
|
|
if (mbr_type.vecsize == 1 || (is_matrix(mbr_type) && mbr_type.vecsize != 3))
|
2018-02-11 21:52:57 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Only row-major matrices need to be packed.
|
|
|
|
if (is_matrix(mbr_type) && !has_member_decoration(ib_type.self, index, DecorationRowMajor))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (is_array(mbr_type))
|
|
|
|
{
|
|
|
|
// If member is an array, and the array stride is larger than the type needs, don't pack it.
|
|
|
|
// Take into consideration multi-dimentional arrays.
|
|
|
|
uint32_t md_elem_cnt = 1;
|
|
|
|
size_t last_elem_idx = mbr_type.array.size() - 1;
|
|
|
|
for (uint32_t i = 0; i < last_elem_idx; i++)
|
2018-11-01 13:56:25 +00:00
|
|
|
md_elem_cnt *= max(to_array_size_literal(mbr_type, i), 1u);
|
2018-02-11 21:52:57 +00:00
|
|
|
|
|
|
|
uint32_t unpacked_array_stride = unpacked_mbr_size * md_elem_cnt;
|
|
|
|
uint32_t array_stride = type_struct_member_array_stride(ib_type, index);
|
|
|
|
return unpacked_array_stride > array_stride;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-11-07 06:31:11 +00:00
|
|
|
uint32_t mbr_offset_curr = get_member_decoration(ib_type.self, index, DecorationOffset);
|
|
|
|
// For vectors, pack if the member's offset doesn't conform to the
|
|
|
|
// type's usual alignment. For example, a float3 at offset 4.
|
|
|
|
if (!is_matrix(mbr_type) && (mbr_offset_curr % unpacked_mbr_size))
|
|
|
|
return true;
|
2018-02-11 21:52:57 +00:00
|
|
|
// Pack if there is not enough space between this member and next.
|
|
|
|
// If last member, only pack if it's a row-major matrix.
|
|
|
|
if (index < ib_type.member_types.size() - 1)
|
|
|
|
{
|
|
|
|
uint32_t mbr_offset_next = get_member_decoration(ib_type.self, index + 1, DecorationOffset);
|
|
|
|
return unpacked_mbr_size > mbr_offset_next - mbr_offset_curr;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return is_matrix(mbr_type);
|
|
|
|
}
|
2017-03-03 01:57:26 +00:00
|
|
|
}
|
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
// Returns a combination of type ID and member index for use as hash key
|
2017-03-01 02:44:36 +00:00
|
|
|
MSLStructMemberKey CompilerMSL::get_struct_member_key(uint32_t type_id, uint32_t index)
|
2017-01-29 18:28:20 +00:00
|
|
|
{
|
2017-03-01 02:44:36 +00:00
|
|
|
MSLStructMemberKey k = type_id;
|
|
|
|
k <<= 32;
|
|
|
|
k += index;
|
|
|
|
return k;
|
2017-01-29 18:28:20 +00:00
|
|
|
}
|
|
|
|
|
2019-01-17 11:21:16 +00:00
|
|
|
void CompilerMSL::emit_store_statement(uint32_t lhs_expression, uint32_t rhs_expression)
|
2019-01-17 09:06:23 +00:00
|
|
|
{
|
2019-01-17 10:22:24 +00:00
|
|
|
if (!has_extended_decoration(lhs_expression, SPIRVCrossDecorationPacked) ||
|
|
|
|
get_extended_decoration(lhs_expression, SPIRVCrossDecorationPackedType) == 0)
|
|
|
|
{
|
2019-01-17 11:21:16 +00:00
|
|
|
CompilerGLSL::emit_store_statement(lhs_expression, rhs_expression);
|
2019-01-17 10:22:24 +00:00
|
|
|
}
|
2019-01-17 09:06:23 +00:00
|
|
|
else
|
|
|
|
{
|
|
|
|
// Special handling when storing to a float[] or float2[] in std140 layout.
|
|
|
|
|
2019-01-17 10:22:24 +00:00
|
|
|
auto &type = get<SPIRType>(get_extended_decoration(lhs_expression, SPIRVCrossDecorationPackedType));
|
2019-01-17 09:06:23 +00:00
|
|
|
string lhs = to_dereferenced_expression(lhs_expression);
|
|
|
|
string rhs = to_pointer_expression(rhs_expression);
|
|
|
|
|
|
|
|
// Unpack the expression so we can store to it with a float or float2.
|
|
|
|
// It's still an l-value, so it's fine. Most other unpacking of expressions turn them into r-values instead.
|
2019-01-17 10:22:24 +00:00
|
|
|
if (is_scalar(type) && is_array(type))
|
2019-01-17 09:06:23 +00:00
|
|
|
lhs = enclose_expression(lhs) + ".x";
|
2019-01-17 10:22:24 +00:00
|
|
|
else if (is_vector(type) && type.vecsize == 2 && is_array(type))
|
2019-01-17 09:06:23 +00:00
|
|
|
lhs = enclose_expression(lhs) + ".xy";
|
|
|
|
|
|
|
|
if (!optimize_read_modify_write(expression_type(rhs_expression), lhs, rhs))
|
|
|
|
statement(lhs, " = ", rhs, ";");
|
|
|
|
register_write(lhs_expression);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-01 18:07:40 +00:00
|
|
|
// Converts the format of the current expression from packed to unpacked,
|
|
|
|
// by wrapping the expression in a constructor of the appropriate type.
|
2019-01-17 10:39:16 +00:00
|
|
|
string CompilerMSL::unpack_expression_type(string expr_str, const SPIRType &type, uint32_t packed_type_id)
|
2017-03-01 18:07:40 +00:00
|
|
|
{
|
2019-01-17 10:39:16 +00:00
|
|
|
const SPIRType *packed_type = nullptr;
|
|
|
|
if (packed_type_id)
|
|
|
|
packed_type = &get<SPIRType>(packed_type_id);
|
|
|
|
|
2019-01-16 15:16:39 +00:00
|
|
|
// float[] and float2[] cases are really just padding, so directly swizzle from the backing float4 instead.
|
2019-01-17 10:39:16 +00:00
|
|
|
if (packed_type && is_array(*packed_type) && is_scalar(*packed_type))
|
2019-01-16 15:16:39 +00:00
|
|
|
return enclose_expression(expr_str) + ".x";
|
2019-01-17 10:39:16 +00:00
|
|
|
else if (packed_type && is_array(*packed_type) && is_vector(*packed_type) && packed_type->vecsize == 2)
|
2019-01-16 15:16:39 +00:00
|
|
|
return enclose_expression(expr_str) + ".xy";
|
|
|
|
else
|
|
|
|
return join(type_to_glsl(type), "(", expr_str, ")");
|
2017-03-01 18:07:40 +00:00
|
|
|
}
|
|
|
|
|
2016-04-06 21:42:27 +00:00
|
|
|
// Emits the file header info
|
|
|
|
void CompilerMSL::emit_header()
|
|
|
|
{
|
2019-04-09 10:28:46 +00:00
|
|
|
// This particular line can be overridden during compilation, so make it a flag and not a pragma line.
|
|
|
|
if (suppress_missing_prototypes)
|
|
|
|
statement("#pragma clang diagnostic ignored \"-Wmissing-prototypes\"");
|
2018-01-06 05:51:25 +00:00
|
|
|
for (auto &pragma : pragma_lines)
|
|
|
|
statement(pragma);
|
2017-01-20 16:24:44 +00:00
|
|
|
|
2019-04-09 10:28:46 +00:00
|
|
|
if (!pragma_lines.empty() || suppress_missing_prototypes)
|
2017-01-20 16:33:59 +00:00
|
|
|
statement("");
|
2016-09-17 03:21:37 +00:00
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
statement("#include <metal_stdlib>");
|
|
|
|
statement("#include <simd/simd.h>");
|
2017-05-19 22:14:08 +00:00
|
|
|
|
|
|
|
for (auto &header : header_lines)
|
|
|
|
statement(header);
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
statement("");
|
|
|
|
statement("using namespace metal;");
|
|
|
|
statement("");
|
2018-02-11 21:52:57 +00:00
|
|
|
|
|
|
|
for (auto &td : typedef_lines)
|
|
|
|
statement(td);
|
|
|
|
|
|
|
|
if (!typedef_lines.empty())
|
|
|
|
statement("");
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
void CompilerMSL::add_pragma_line(const string &line)
|
|
|
|
{
|
2018-02-11 21:52:57 +00:00
|
|
|
auto rslt = pragma_lines.insert(line);
|
|
|
|
if (rslt.second)
|
2019-04-05 10:06:10 +00:00
|
|
|
force_recompile();
|
2018-02-11 21:52:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void CompilerMSL::add_typedef_line(const string &line)
|
|
|
|
{
|
|
|
|
auto rslt = typedef_lines.insert(line);
|
|
|
|
if (rslt.second)
|
2019-04-05 10:06:10 +00:00
|
|
|
force_recompile();
|
2017-05-19 22:14:08 +00:00
|
|
|
}
|
|
|
|
|
2016-12-19 02:42:10 +00:00
|
|
|
// Emits any needed custom function bodies.
|
|
|
|
void CompilerMSL::emit_custom_functions()
|
|
|
|
{
|
2018-09-11 10:58:03 +00:00
|
|
|
for (uint32_t i = SPVFuncImplArrayCopyMultidimMax; i >= 2; i--)
|
|
|
|
if (spv_function_implementations.count(static_cast<SPVFuncImpl>(SPVFuncImplArrayCopyMultidimBase + i)))
|
|
|
|
spv_function_implementations.insert(static_cast<SPVFuncImpl>(SPVFuncImplArrayCopyMultidimBase + i - 1));
|
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
for (auto &spv_func : spv_function_implementations)
|
2016-12-19 02:42:10 +00:00
|
|
|
{
|
2017-05-19 22:14:08 +00:00
|
|
|
switch (spv_func)
|
2016-12-19 02:42:10 +00:00
|
|
|
{
|
2017-05-19 22:14:08 +00:00
|
|
|
case SPVFuncImplMod:
|
|
|
|
statement("// Implementation of the GLSL mod() function, which is slightly different than Metal fmod()");
|
2016-12-19 02:42:10 +00:00
|
|
|
statement("template<typename Tx, typename Ty>");
|
|
|
|
statement("Tx mod(Tx x, Ty y)");
|
|
|
|
begin_scope();
|
|
|
|
statement("return x - y * floor(x / y);");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
break;
|
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
case SPVFuncImplRadians:
|
|
|
|
statement("// Implementation of the GLSL radians() function");
|
|
|
|
statement("template<typename T>");
|
|
|
|
statement("T radians(T d)");
|
|
|
|
begin_scope();
|
2018-03-07 09:24:21 +00:00
|
|
|
statement("return d * T(0.01745329251);");
|
2017-05-19 22:14:08 +00:00
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SPVFuncImplDegrees:
|
|
|
|
statement("// Implementation of the GLSL degrees() function");
|
|
|
|
statement("template<typename T>");
|
|
|
|
statement("T degrees(T r)");
|
|
|
|
begin_scope();
|
2018-03-07 09:24:21 +00:00
|
|
|
statement("return r * T(57.2957795131);");
|
2017-05-19 22:14:08 +00:00
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SPVFuncImplFindILsb:
|
|
|
|
statement("// Implementation of the GLSL findLSB() function");
|
|
|
|
statement("template<typename T>");
|
|
|
|
statement("T findLSB(T x)");
|
|
|
|
begin_scope();
|
2017-11-06 02:34:42 +00:00
|
|
|
statement("return select(ctz(x), T(-1), x == T(0));");
|
2017-05-19 22:14:08 +00:00
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SPVFuncImplFindUMsb:
|
|
|
|
statement("// Implementation of the unsigned GLSL findMSB() function");
|
|
|
|
statement("template<typename T>");
|
|
|
|
statement("T findUMSB(T x)");
|
|
|
|
begin_scope();
|
2017-11-06 02:34:42 +00:00
|
|
|
statement("return select(clz(T(0)) - (clz(x) + T(1)), T(-1), x == T(0));");
|
2017-05-19 22:14:08 +00:00
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SPVFuncImplFindSMsb:
|
|
|
|
statement("// Implementation of the signed GLSL findMSB() function");
|
|
|
|
statement("template<typename T>");
|
|
|
|
statement("T findSMSB(T x)");
|
|
|
|
begin_scope();
|
2017-11-06 02:34:42 +00:00
|
|
|
statement("T v = select(x, T(-1) - x, x < T(0));");
|
|
|
|
statement("return select(clz(T(0)) - (clz(v) + T(1)), T(-1), v == T(0));");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
break;
|
|
|
|
|
2018-11-07 12:24:21 +00:00
|
|
|
case SPVFuncImplSSign:
|
|
|
|
statement("// Implementation of the GLSL sign() function for integer types");
|
|
|
|
statement("template<typename T, typename E = typename enable_if<is_integral<T>::value>::type>");
|
|
|
|
statement("T sign(T x)");
|
|
|
|
begin_scope();
|
|
|
|
statement("return select(select(select(x, T(0), x == T(0)), T(1), x > T(0)), T(-1), x < T(0));");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
break;
|
|
|
|
|
2017-11-06 02:34:42 +00:00
|
|
|
case SPVFuncImplArrayCopy:
|
|
|
|
statement("// Implementation of an array copy function to cover GLSL's ability to copy an array via "
|
2018-02-08 12:06:29 +00:00
|
|
|
"assignment.");
|
2018-02-08 16:58:46 +00:00
|
|
|
statement("template<typename T, uint N>");
|
2018-09-11 10:58:03 +00:00
|
|
|
statement("void spvArrayCopyFromStack1(thread T (&dst)[N], thread const T (&src)[N])");
|
2018-02-08 16:58:46 +00:00
|
|
|
begin_scope();
|
|
|
|
statement("for (uint i = 0; i < N; dst[i] = src[i], i++);");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
|
|
|
|
statement("template<typename T, uint N>");
|
2018-09-11 10:58:03 +00:00
|
|
|
statement("void spvArrayCopyFromConstant1(thread T (&dst)[N], constant T (&src)[N])");
|
2017-11-06 02:34:42 +00:00
|
|
|
begin_scope();
|
2018-02-05 11:37:41 +00:00
|
|
|
statement("for (uint i = 0; i < N; dst[i] = src[i], i++);");
|
2017-05-19 22:14:08 +00:00
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
break;
|
|
|
|
|
2018-09-11 10:58:03 +00:00
|
|
|
case SPVFuncImplArrayOfArrayCopy2Dim:
|
|
|
|
case SPVFuncImplArrayOfArrayCopy3Dim:
|
|
|
|
case SPVFuncImplArrayOfArrayCopy4Dim:
|
|
|
|
case SPVFuncImplArrayOfArrayCopy5Dim:
|
|
|
|
case SPVFuncImplArrayOfArrayCopy6Dim:
|
|
|
|
{
|
|
|
|
static const char *function_name_tags[] = {
|
|
|
|
"FromStack",
|
|
|
|
"FromConstant",
|
|
|
|
};
|
|
|
|
|
|
|
|
static const char *src_address_space[] = {
|
|
|
|
"thread const",
|
|
|
|
"constant",
|
|
|
|
};
|
|
|
|
|
|
|
|
for (uint32_t variant = 0; variant < 2; variant++)
|
|
|
|
{
|
|
|
|
uint32_t dimensions = spv_func - SPVFuncImplArrayCopyMultidimBase;
|
|
|
|
string tmp = "template<typename T";
|
2018-09-20 20:10:42 +00:00
|
|
|
for (uint8_t i = 0; i < dimensions; i++)
|
2018-09-11 10:58:03 +00:00
|
|
|
{
|
|
|
|
tmp += ", uint ";
|
|
|
|
tmp += 'A' + i;
|
|
|
|
}
|
|
|
|
tmp += ">";
|
|
|
|
statement(tmp);
|
|
|
|
|
|
|
|
string array_arg;
|
2018-09-20 20:10:42 +00:00
|
|
|
for (uint8_t i = 0; i < dimensions; i++)
|
2018-09-11 10:58:03 +00:00
|
|
|
{
|
|
|
|
array_arg += "[";
|
|
|
|
array_arg += 'A' + i;
|
|
|
|
array_arg += "]";
|
|
|
|
}
|
|
|
|
|
2018-09-12 08:53:50 +00:00
|
|
|
statement("void spvArrayCopy", function_name_tags[variant], dimensions, "(thread T (&dst)", array_arg,
|
|
|
|
", ", src_address_space[variant], " T (&src)", array_arg, ")");
|
2018-09-11 10:58:03 +00:00
|
|
|
|
|
|
|
begin_scope();
|
|
|
|
statement("for (uint i = 0; i < A; i++)");
|
|
|
|
begin_scope();
|
|
|
|
statement("spvArrayCopy", function_name_tags[variant], dimensions - 1, "(dst[i], src[i]);");
|
|
|
|
end_scope();
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-06-26 21:30:21 +00:00
|
|
|
case SPVFuncImplTexelBufferCoords:
|
|
|
|
{
|
2018-06-27 14:34:15 +00:00
|
|
|
string tex_width_str = convert_to_string(msl_options.texel_buffer_texture_width);
|
2018-06-26 21:30:21 +00:00
|
|
|
statement("// Returns 2D texture coords corresponding to 1D texel buffer coords");
|
|
|
|
statement("uint2 spvTexelBufferCoord(uint tc)");
|
|
|
|
begin_scope();
|
2018-06-27 14:34:15 +00:00
|
|
|
statement(join("return uint2(tc % ", tex_width_str, ", tc / ", tex_width_str, ");"));
|
2018-06-26 21:30:21 +00:00
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
case SPVFuncImplInverse4x4:
|
|
|
|
statement("// Returns the determinant of a 2x2 matrix.");
|
|
|
|
statement("inline float spvDet2x2(float a1, float a2, float b1, float b2)");
|
|
|
|
begin_scope();
|
|
|
|
statement("return a1 * b2 - b1 * a2;");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
2018-02-23 15:48:16 +00:00
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
statement("// Returns the determinant of a 3x3 matrix.");
|
|
|
|
statement("inline float spvDet3x3(float a1, float a2, float a3, float b1, float b2, float b3, float c1, "
|
|
|
|
"float c2, float c3)");
|
|
|
|
begin_scope();
|
|
|
|
statement("return a1 * spvDet2x2(b2, b3, c2, c3) - b1 * spvDet2x2(a2, a3, c2, c3) + c1 * spvDet2x2(a2, a3, "
|
|
|
|
"b2, b3);");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
statement("// Returns the inverse of a matrix, by using the algorithm of calculating the classical");
|
|
|
|
statement("// adjoint and dividing by the determinant. The contents of the matrix are changed.");
|
|
|
|
statement("float4x4 spvInverse4x4(float4x4 m)");
|
|
|
|
begin_scope();
|
|
|
|
statement("float4x4 adj; // The adjoint matrix (inverse after dividing by determinant)");
|
2018-02-23 15:48:16 +00:00
|
|
|
statement_no_indent("");
|
2017-05-19 22:14:08 +00:00
|
|
|
statement("// Create the transpose of the cofactors, as the classical adjoint of the matrix.");
|
|
|
|
statement("adj[0][0] = spvDet3x3(m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], "
|
|
|
|
"m[3][3]);");
|
|
|
|
statement("adj[0][1] = -spvDet3x3(m[0][1], m[0][2], m[0][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], "
|
|
|
|
"m[3][3]);");
|
|
|
|
statement("adj[0][2] = spvDet3x3(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[3][1], m[3][2], "
|
|
|
|
"m[3][3]);");
|
|
|
|
statement("adj[0][3] = -spvDet3x3(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], "
|
|
|
|
"m[2][3]);");
|
2018-02-23 15:48:16 +00:00
|
|
|
statement_no_indent("");
|
2017-05-19 22:14:08 +00:00
|
|
|
statement("adj[1][0] = -spvDet3x3(m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], "
|
|
|
|
"m[3][3]);");
|
|
|
|
statement("adj[1][1] = spvDet3x3(m[0][0], m[0][2], m[0][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], "
|
|
|
|
"m[3][3]);");
|
|
|
|
statement("adj[1][2] = -spvDet3x3(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[3][0], m[3][2], "
|
|
|
|
"m[3][3]);");
|
|
|
|
statement("adj[1][3] = spvDet3x3(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], "
|
|
|
|
"m[2][3]);");
|
2018-02-23 15:48:16 +00:00
|
|
|
statement_no_indent("");
|
2017-05-19 22:14:08 +00:00
|
|
|
statement("adj[2][0] = spvDet3x3(m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], "
|
|
|
|
"m[3][3]);");
|
|
|
|
statement("adj[2][1] = -spvDet3x3(m[0][0], m[0][1], m[0][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], "
|
|
|
|
"m[3][3]);");
|
|
|
|
statement("adj[2][2] = spvDet3x3(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[3][0], m[3][1], "
|
|
|
|
"m[3][3]);");
|
|
|
|
statement("adj[2][3] = -spvDet3x3(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], "
|
|
|
|
"m[2][3]);");
|
2018-02-23 15:48:16 +00:00
|
|
|
statement_no_indent("");
|
2017-05-19 22:14:08 +00:00
|
|
|
statement("adj[3][0] = -spvDet3x3(m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], "
|
|
|
|
"m[3][2]);");
|
|
|
|
statement("adj[3][1] = spvDet3x3(m[0][0], m[0][1], m[0][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], "
|
|
|
|
"m[3][2]);");
|
|
|
|
statement("adj[3][2] = -spvDet3x3(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[3][0], m[3][1], "
|
|
|
|
"m[3][2]);");
|
|
|
|
statement("adj[3][3] = spvDet3x3(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], "
|
|
|
|
"m[2][2]);");
|
2018-02-23 15:48:16 +00:00
|
|
|
statement_no_indent("");
|
2017-05-19 22:14:08 +00:00
|
|
|
statement("// Calculate the determinant as a combination of the cofactors of the first row.");
|
|
|
|
statement("float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]) + (adj[0][2] * m[2][0]) + (adj[0][3] "
|
|
|
|
"* m[3][0]);");
|
2018-02-23 15:48:16 +00:00
|
|
|
statement_no_indent("");
|
2017-05-19 22:14:08 +00:00
|
|
|
statement("// Divide the classical adjoint matrix by the determinant.");
|
|
|
|
statement("// If determinant is zero, matrix is not invertable, so leave it unchanged.");
|
|
|
|
statement("return (det != 0.0f) ? (adj * (1.0f / det)) : m;");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SPVFuncImplInverse3x3:
|
2018-02-23 15:48:16 +00:00
|
|
|
if (spv_function_implementations.count(SPVFuncImplInverse4x4) == 0)
|
|
|
|
{
|
|
|
|
statement("// Returns the determinant of a 2x2 matrix.");
|
|
|
|
statement("inline float spvDet2x2(float a1, float a2, float b1, float b2)");
|
|
|
|
begin_scope();
|
|
|
|
statement("return a1 * b2 - b1 * a2;");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
}
|
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
statement("// Returns the inverse of a matrix, by using the algorithm of calculating the classical");
|
|
|
|
statement("// adjoint and dividing by the determinant. The contents of the matrix are changed.");
|
|
|
|
statement("float3x3 spvInverse3x3(float3x3 m)");
|
|
|
|
begin_scope();
|
|
|
|
statement("float3x3 adj; // The adjoint matrix (inverse after dividing by determinant)");
|
2018-02-23 15:48:16 +00:00
|
|
|
statement_no_indent("");
|
2017-05-19 22:14:08 +00:00
|
|
|
statement("// Create the transpose of the cofactors, as the classical adjoint of the matrix.");
|
|
|
|
statement("adj[0][0] = spvDet2x2(m[1][1], m[1][2], m[2][1], m[2][2]);");
|
|
|
|
statement("adj[0][1] = -spvDet2x2(m[0][1], m[0][2], m[2][1], m[2][2]);");
|
|
|
|
statement("adj[0][2] = spvDet2x2(m[0][1], m[0][2], m[1][1], m[1][2]);");
|
2018-02-23 15:48:16 +00:00
|
|
|
statement_no_indent("");
|
2017-05-19 22:14:08 +00:00
|
|
|
statement("adj[1][0] = -spvDet2x2(m[1][0], m[1][2], m[2][0], m[2][2]);");
|
|
|
|
statement("adj[1][1] = spvDet2x2(m[0][0], m[0][2], m[2][0], m[2][2]);");
|
|
|
|
statement("adj[1][2] = -spvDet2x2(m[0][0], m[0][2], m[1][0], m[1][2]);");
|
2018-02-23 15:48:16 +00:00
|
|
|
statement_no_indent("");
|
2017-05-19 22:14:08 +00:00
|
|
|
statement("adj[2][0] = spvDet2x2(m[1][0], m[1][1], m[2][0], m[2][1]);");
|
|
|
|
statement("adj[2][1] = -spvDet2x2(m[0][0], m[0][1], m[2][0], m[2][1]);");
|
|
|
|
statement("adj[2][2] = spvDet2x2(m[0][0], m[0][1], m[1][0], m[1][1]);");
|
2018-02-23 15:48:16 +00:00
|
|
|
statement_no_indent("");
|
2017-05-19 22:14:08 +00:00
|
|
|
statement("// Calculate the determinant as a combination of the cofactors of the first row.");
|
|
|
|
statement("float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]) + (adj[0][2] * m[2][0]);");
|
2018-02-23 15:48:16 +00:00
|
|
|
statement_no_indent("");
|
2017-05-19 22:14:08 +00:00
|
|
|
statement("// Divide the classical adjoint matrix by the determinant.");
|
|
|
|
statement("// If determinant is zero, matrix is not invertable, so leave it unchanged.");
|
|
|
|
statement("return (det != 0.0f) ? (adj * (1.0f / det)) : m;");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SPVFuncImplInverse2x2:
|
|
|
|
statement("// Returns the inverse of a matrix, by using the algorithm of calculating the classical");
|
|
|
|
statement("// adjoint and dividing by the determinant. The contents of the matrix are changed.");
|
|
|
|
statement("float2x2 spvInverse2x2(float2x2 m)");
|
|
|
|
begin_scope();
|
|
|
|
statement("float2x2 adj; // The adjoint matrix (inverse after dividing by determinant)");
|
2018-02-23 15:48:16 +00:00
|
|
|
statement_no_indent("");
|
2017-05-19 22:14:08 +00:00
|
|
|
statement("// Create the transpose of the cofactors, as the classical adjoint of the matrix.");
|
|
|
|
statement("adj[0][0] = m[1][1];");
|
|
|
|
statement("adj[0][1] = -m[0][1];");
|
2018-02-23 15:48:16 +00:00
|
|
|
statement_no_indent("");
|
2017-05-19 22:14:08 +00:00
|
|
|
statement("adj[1][0] = -m[1][0];");
|
|
|
|
statement("adj[1][1] = m[0][0];");
|
2018-02-23 15:48:16 +00:00
|
|
|
statement_no_indent("");
|
2017-05-19 22:14:08 +00:00
|
|
|
statement("// Calculate the determinant as a combination of the cofactors of the first row.");
|
|
|
|
statement("float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]);");
|
2018-02-23 15:48:16 +00:00
|
|
|
statement_no_indent("");
|
2017-05-19 22:14:08 +00:00
|
|
|
statement("// Divide the classical adjoint matrix by the determinant.");
|
|
|
|
statement("// If determinant is zero, matrix is not invertable, so leave it unchanged.");
|
|
|
|
statement("return (det != 0.0f) ? (adj * (1.0f / det)) : m;");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
break;
|
|
|
|
|
2018-01-04 21:33:45 +00:00
|
|
|
case SPVFuncImplRowMajor2x3:
|
|
|
|
statement("// Implementation of a conversion of matrix content from RowMajor to ColumnMajor organization.");
|
|
|
|
statement("float2x3 spvConvertFromRowMajor2x3(float2x3 m)");
|
|
|
|
begin_scope();
|
|
|
|
statement("return float2x3(float3(m[0][0], m[0][2], m[1][1]), float3(m[0][1], m[1][0], m[1][2]));");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SPVFuncImplRowMajor2x4:
|
|
|
|
statement("// Implementation of a conversion of matrix content from RowMajor to ColumnMajor organization.");
|
|
|
|
statement("float2x4 spvConvertFromRowMajor2x4(float2x4 m)");
|
|
|
|
begin_scope();
|
|
|
|
statement("return float2x4(float4(m[0][0], m[0][2], m[1][0], m[1][2]), float4(m[0][1], m[0][3], m[1][1], "
|
|
|
|
"m[1][3]));");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SPVFuncImplRowMajor3x2:
|
|
|
|
statement("// Implementation of a conversion of matrix content from RowMajor to ColumnMajor organization.");
|
|
|
|
statement("float3x2 spvConvertFromRowMajor3x2(float3x2 m)");
|
|
|
|
begin_scope();
|
|
|
|
statement("return float3x2(float2(m[0][0], m[1][1]), float2(m[0][1], m[2][0]), float2(m[1][0], m[2][1]));");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SPVFuncImplRowMajor3x4:
|
|
|
|
statement("// Implementation of a conversion of matrix content from RowMajor to ColumnMajor organization.");
|
|
|
|
statement("float3x4 spvConvertFromRowMajor3x4(float3x4 m)");
|
|
|
|
begin_scope();
|
|
|
|
statement("return float3x4(float4(m[0][0], m[0][3], m[1][2], m[2][1]), float4(m[0][1], m[1][0], m[1][3], "
|
|
|
|
"m[2][2]), float4(m[0][2], m[1][1], m[2][0], m[2][3]));");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SPVFuncImplRowMajor4x2:
|
|
|
|
statement("// Implementation of a conversion of matrix content from RowMajor to ColumnMajor organization.");
|
|
|
|
statement("float4x2 spvConvertFromRowMajor4x2(float4x2 m)");
|
|
|
|
begin_scope();
|
|
|
|
statement("return float4x2(float2(m[0][0], m[2][0]), float2(m[0][1], m[2][1]), float2(m[1][0], m[3][0]), "
|
|
|
|
"float2(m[1][1], m[3][1]));");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SPVFuncImplRowMajor4x3:
|
|
|
|
statement("// Implementation of a conversion of matrix content from RowMajor to ColumnMajor organization.");
|
|
|
|
statement("float4x3 spvConvertFromRowMajor4x3(float4x3 m)");
|
|
|
|
begin_scope();
|
|
|
|
statement("return float4x3(float3(m[0][0], m[1][1], m[2][2]), float3(m[0][1], m[1][2], m[3][0]), "
|
|
|
|
"float3(m[0][2], m[2][0], m[3][1]), float3(m[1][0], m[2][1], m[3][2]));");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
break;
|
|
|
|
|
2018-09-20 01:36:33 +00:00
|
|
|
case SPVFuncImplTextureSwizzle:
|
|
|
|
statement("enum class spvSwizzle : uint");
|
|
|
|
begin_scope();
|
|
|
|
statement("none = 0,");
|
|
|
|
statement("zero,");
|
|
|
|
statement("one,");
|
|
|
|
statement("red,");
|
|
|
|
statement("green,");
|
|
|
|
statement("blue,");
|
|
|
|
statement("alpha");
|
2018-09-21 18:52:20 +00:00
|
|
|
end_scope_decl();
|
2018-09-20 01:36:33 +00:00
|
|
|
statement("");
|
|
|
|
statement("template<typename T> struct spvRemoveReference { typedef T type; };");
|
|
|
|
statement("template<typename T> struct spvRemoveReference<thread T&> { typedef T type; };");
|
|
|
|
statement("template<typename T> struct spvRemoveReference<thread T&&> { typedef T type; };");
|
|
|
|
statement("template<typename T> inline constexpr thread T&& spvForward(thread typename "
|
|
|
|
"spvRemoveReference<T>::type& x)");
|
|
|
|
begin_scope();
|
|
|
|
statement("return static_cast<thread T&&>(x);");
|
|
|
|
end_scope();
|
|
|
|
statement("template<typename T> inline constexpr thread T&& spvForward(thread typename "
|
|
|
|
"spvRemoveReference<T>::type&& x)");
|
|
|
|
begin_scope();
|
|
|
|
statement("return static_cast<thread T&&>(x);");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
statement("template<typename T>");
|
2019-01-07 17:17:12 +00:00
|
|
|
statement("inline T spvGetSwizzle(vec<T, 4> x, T c, spvSwizzle s)");
|
2018-09-20 01:36:33 +00:00
|
|
|
begin_scope();
|
|
|
|
statement("switch (s)");
|
|
|
|
begin_scope();
|
2019-01-07 17:17:12 +00:00
|
|
|
statement("case spvSwizzle::none:");
|
|
|
|
statement(" return c;");
|
2018-09-20 01:36:33 +00:00
|
|
|
statement("case spvSwizzle::zero:");
|
|
|
|
statement(" return 0;");
|
|
|
|
statement("case spvSwizzle::one:");
|
|
|
|
statement(" return 1;");
|
|
|
|
statement("case spvSwizzle::red:");
|
|
|
|
statement(" return x.r;");
|
|
|
|
statement("case spvSwizzle::green:");
|
|
|
|
statement(" return x.g;");
|
|
|
|
statement("case spvSwizzle::blue:");
|
|
|
|
statement(" return x.b;");
|
|
|
|
statement("case spvSwizzle::alpha:");
|
|
|
|
statement(" return x.a;");
|
|
|
|
end_scope();
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
statement("// Wrapper function that swizzles texture samples and fetches.");
|
|
|
|
statement("template<typename T>");
|
|
|
|
statement("inline vec<T, 4> spvTextureSwizzle(vec<T, 4> x, uint s)");
|
|
|
|
begin_scope();
|
|
|
|
statement("if (!s)");
|
|
|
|
statement(" return x;");
|
2019-01-07 17:17:12 +00:00
|
|
|
statement("return vec<T, 4>(spvGetSwizzle(x, x.r, spvSwizzle((s >> 0) & 0xFF)), "
|
2019-01-08 10:03:59 +00:00
|
|
|
"spvGetSwizzle(x, x.g, spvSwizzle((s >> 8) & 0xFF)), spvGetSwizzle(x, x.b, spvSwizzle((s >> 16) "
|
|
|
|
"& 0xFF)), "
|
2019-01-07 17:17:12 +00:00
|
|
|
"spvGetSwizzle(x, x.a, spvSwizzle((s >> 24) & 0xFF)));");
|
2018-09-20 01:36:33 +00:00
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
statement("template<typename T>");
|
|
|
|
statement("inline T spvTextureSwizzle(T x, uint s)");
|
|
|
|
begin_scope();
|
|
|
|
statement("return spvTextureSwizzle(vec<T, 4>(x, 0, 0, 1), s).x;");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
statement("// Wrapper function that swizzles texture gathers.");
|
|
|
|
statement("template<typename T, typename Tex, typename... Ts>");
|
2019-01-17 10:29:50 +00:00
|
|
|
statement(
|
|
|
|
"inline vec<T, 4> spvGatherSwizzle(sampler s, const thread Tex& t, Ts... params, component c, uint sw) "
|
|
|
|
"METAL_CONST_ARG(c)");
|
2018-09-20 01:36:33 +00:00
|
|
|
begin_scope();
|
|
|
|
statement("if (sw)");
|
|
|
|
begin_scope();
|
2018-09-22 03:50:29 +00:00
|
|
|
statement("switch (spvSwizzle((sw >> (uint(c) * 8)) & 0xFF))");
|
2018-09-20 01:36:33 +00:00
|
|
|
begin_scope();
|
|
|
|
statement("case spvSwizzle::none:");
|
|
|
|
statement(" break;");
|
|
|
|
statement("case spvSwizzle::zero:");
|
|
|
|
statement(" return vec<T, 4>(0, 0, 0, 0);");
|
|
|
|
statement("case spvSwizzle::one:");
|
|
|
|
statement(" return vec<T, 4>(1, 1, 1, 1);");
|
|
|
|
statement("case spvSwizzle::red:");
|
|
|
|
statement(" return t.gather(s, spvForward<Ts>(params)..., component::x);");
|
|
|
|
statement("case spvSwizzle::green:");
|
|
|
|
statement(" return t.gather(s, spvForward<Ts>(params)..., component::y);");
|
|
|
|
statement("case spvSwizzle::blue:");
|
|
|
|
statement(" return t.gather(s, spvForward<Ts>(params)..., component::z);");
|
|
|
|
statement("case spvSwizzle::alpha:");
|
|
|
|
statement(" return t.gather(s, spvForward<Ts>(params)..., component::w);");
|
|
|
|
end_scope();
|
|
|
|
end_scope();
|
|
|
|
// texture::gather insists on its component parameter being a constant
|
|
|
|
// expression, so we need this silly workaround just to compile the shader.
|
|
|
|
statement("switch (c)");
|
|
|
|
begin_scope();
|
|
|
|
statement("case component::x:");
|
|
|
|
statement(" return t.gather(s, spvForward<Ts>(params)..., component::x);");
|
|
|
|
statement("case component::y:");
|
|
|
|
statement(" return t.gather(s, spvForward<Ts>(params)..., component::y);");
|
|
|
|
statement("case component::z:");
|
|
|
|
statement(" return t.gather(s, spvForward<Ts>(params)..., component::z);");
|
|
|
|
statement("case component::w:");
|
|
|
|
statement(" return t.gather(s, spvForward<Ts>(params)..., component::w);");
|
|
|
|
end_scope();
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
2018-09-29 14:52:26 +00:00
|
|
|
statement("// Wrapper function that swizzles depth texture gathers.");
|
|
|
|
statement("template<typename T, typename Tex, typename... Ts>");
|
2019-01-17 10:29:50 +00:00
|
|
|
statement(
|
|
|
|
"inline vec<T, 4> spvGatherCompareSwizzle(sampler s, const thread Tex& t, Ts... params, uint sw) ");
|
2018-09-29 14:52:26 +00:00
|
|
|
begin_scope();
|
|
|
|
statement("if (sw)");
|
|
|
|
begin_scope();
|
|
|
|
statement("switch (spvSwizzle(sw & 0xFF))");
|
|
|
|
begin_scope();
|
|
|
|
statement("case spvSwizzle::none:");
|
|
|
|
statement("case spvSwizzle::red:");
|
|
|
|
statement(" break;");
|
|
|
|
statement("case spvSwizzle::zero:");
|
|
|
|
statement("case spvSwizzle::green:");
|
|
|
|
statement("case spvSwizzle::blue:");
|
|
|
|
statement("case spvSwizzle::alpha:");
|
|
|
|
statement(" return vec<T, 4>(0, 0, 0, 0);");
|
|
|
|
statement("case spvSwizzle::one:");
|
|
|
|
statement(" return vec<T, 4>(1, 1, 1, 1);");
|
|
|
|
end_scope();
|
|
|
|
end_scope();
|
|
|
|
statement("return t.gather_compare(s, spvForward<Ts>(params)...);");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case SPVFuncImplSubgroupBallot:
|
|
|
|
statement("inline uint4 spvSubgroupBallot(bool value)");
|
|
|
|
begin_scope();
|
|
|
|
statement("simd_vote vote = simd_ballot(value);");
|
|
|
|
statement("// simd_ballot() returns a 64-bit integer-like object, but");
|
|
|
|
statement("// SPIR-V callers expect a uint4. We must convert.");
|
|
|
|
statement("// FIXME: This won't include higher bits if Apple ever supports");
|
|
|
|
statement("// 128 lanes in an SIMD-group.");
|
|
|
|
statement("return uint4((uint)((simd_vote::vote_t)vote & 0xFFFFFFFF), (uint)(((simd_vote::vote_t)vote >> "
|
|
|
|
"32) & 0xFFFFFFFF), 0, 0);");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SPVFuncImplSubgroupBallotBitExtract:
|
|
|
|
statement("inline bool spvSubgroupBallotBitExtract(uint4 ballot, uint bit)");
|
|
|
|
begin_scope();
|
|
|
|
statement("return !!extract_bits(ballot[bit / 32], bit % 32, 1);");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SPVFuncImplSubgroupBallotFindLSB:
|
|
|
|
statement("inline uint spvSubgroupBallotFindLSB(uint4 ballot)");
|
|
|
|
begin_scope();
|
|
|
|
statement("return select(ctz(ballot.x), select(32 + ctz(ballot.y), select(64 + ctz(ballot.z), select(96 + "
|
|
|
|
"ctz(ballot.w), uint(-1), ballot.w == 0), ballot.z == 0), ballot.y == 0), ballot.x == 0);");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SPVFuncImplSubgroupBallotFindMSB:
|
|
|
|
statement("inline uint spvSubgroupBallotFindMSB(uint4 ballot)");
|
|
|
|
begin_scope();
|
|
|
|
statement("return select(128 - (clz(ballot.w) + 1), select(96 - (clz(ballot.z) + 1), select(64 - "
|
|
|
|
"(clz(ballot.y) + 1), select(32 - (clz(ballot.x) + 1), uint(-1), ballot.x == 0), ballot.y == 0), "
|
|
|
|
"ballot.z == 0), ballot.w == 0);");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SPVFuncImplSubgroupBallotBitCount:
|
|
|
|
statement("inline uint spvSubgroupBallotBitCount(uint4 ballot)");
|
|
|
|
begin_scope();
|
|
|
|
statement("return popcount(ballot.x) + popcount(ballot.y) + popcount(ballot.z) + popcount(ballot.w);");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
statement("inline uint spvSubgroupBallotInclusiveBitCount(uint4 ballot, uint gl_SubgroupInvocationID)");
|
|
|
|
begin_scope();
|
|
|
|
statement("uint4 mask = uint4(extract_bits(0xFFFFFFFF, 0, min(gl_SubgroupInvocationID + 1, 32u)), "
|
|
|
|
"extract_bits(0xFFFFFFFF, 0, (uint)max((int)gl_SubgroupInvocationID + 1 - 32, 0)), "
|
|
|
|
"uint2(0));");
|
|
|
|
statement("return spvSubgroupBallotBitCount(ballot & mask);");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
statement("inline uint spvSubgroupBallotExclusiveBitCount(uint4 ballot, uint gl_SubgroupInvocationID)");
|
|
|
|
begin_scope();
|
|
|
|
statement("uint4 mask = uint4(extract_bits(0xFFFFFFFF, 0, min(gl_SubgroupInvocationID, 32u)), "
|
|
|
|
"extract_bits(0xFFFFFFFF, 0, (uint)max((int)gl_SubgroupInvocationID - 32, 0)), uint2(0));");
|
|
|
|
statement("return spvSubgroupBallotBitCount(ballot & mask);");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SPVFuncImplSubgroupAllEqual:
|
|
|
|
// Metal doesn't provide a function to evaluate this directly. But, we can
|
|
|
|
// implement this by comparing every thread's value to one thread's value
|
|
|
|
// (in this case, the value of the first active thread). Then, by the transitive
|
|
|
|
// property of equality, if all comparisons return true, then they are all equal.
|
|
|
|
statement("template<typename T>");
|
|
|
|
statement("inline bool spvSubgroupAllEqual(T value)");
|
|
|
|
begin_scope();
|
|
|
|
statement("return simd_all(value == simd_broadcast_first(value));");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
statement("template<>");
|
|
|
|
statement("inline bool spvSubgroupAllEqual(bool value)");
|
|
|
|
begin_scope();
|
|
|
|
statement("return simd_all(value) || !simd_any(value);");
|
|
|
|
end_scope();
|
|
|
|
statement("");
|
|
|
|
break;
|
2018-09-20 01:36:33 +00:00
|
|
|
|
2016-12-19 02:42:10 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-16 03:44:42 +00:00
|
|
|
// Undefined global memory is not allowed in MSL.
|
2017-11-30 20:03:27 +00:00
|
|
|
// Declare constant and init to zeros. Use {}, as global constructors can break Metal.
|
2017-11-16 03:44:42 +00:00
|
|
|
void CompilerMSL::declare_undefined_values()
|
|
|
|
{
|
|
|
|
bool emitted = false;
|
2019-01-10 08:49:33 +00:00
|
|
|
ir.for_each_typed_id<SPIRUndef>([&](uint32_t, SPIRUndef &undef) {
|
2019-01-11 08:29:28 +00:00
|
|
|
auto &type = this->get<SPIRType>(undef.basetype);
|
2019-01-10 08:49:33 +00:00
|
|
|
statement("constant ", variable_decl(type, to_name(undef.self), undef.self), " = {};");
|
|
|
|
emitted = true;
|
|
|
|
});
|
2017-11-16 03:44:42 +00:00
|
|
|
|
|
|
|
if (emitted)
|
|
|
|
statement("");
|
|
|
|
}
|
|
|
|
|
2018-02-08 12:06:29 +00:00
|
|
|
void CompilerMSL::declare_constant_arrays()
|
|
|
|
{
|
|
|
|
// MSL cannot declare arrays inline (except when declaring a variable), so we must move them out to
|
|
|
|
// global constants directly, so we are able to use constants as variable expressions.
|
|
|
|
bool emitted = false;
|
|
|
|
|
2019-01-10 08:49:33 +00:00
|
|
|
ir.for_each_typed_id<SPIRConstant>([&](uint32_t, SPIRConstant &c) {
|
|
|
|
if (c.specialization)
|
|
|
|
return;
|
2018-02-08 12:06:29 +00:00
|
|
|
|
2019-01-11 08:29:28 +00:00
|
|
|
auto &type = this->get<SPIRType>(c.constant_type);
|
2019-01-10 08:49:33 +00:00
|
|
|
if (!type.array.empty())
|
|
|
|
{
|
|
|
|
auto name = to_name(c.self);
|
|
|
|
statement("constant ", variable_decl(type, name), " = ", constant_expression(c), ";");
|
|
|
|
emitted = true;
|
2018-02-08 12:06:29 +00:00
|
|
|
}
|
2019-01-10 08:49:33 +00:00
|
|
|
});
|
2018-02-08 12:06:29 +00:00
|
|
|
|
|
|
|
if (emitted)
|
|
|
|
statement("");
|
|
|
|
}
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
void CompilerMSL::emit_resources()
|
|
|
|
{
|
2018-02-08 12:06:29 +00:00
|
|
|
declare_constant_arrays();
|
2017-11-16 03:44:42 +00:00
|
|
|
declare_undefined_values();
|
|
|
|
|
2018-09-13 12:42:05 +00:00
|
|
|
// Emit the special [[stage_in]] and [[stage_out]] interface blocks which we created.
|
2016-05-05 07:33:18 +00:00
|
|
|
emit_interface_block(stage_out_var_id);
|
2019-02-04 05:58:46 +00:00
|
|
|
emit_interface_block(patch_stage_out_var_id);
|
2018-06-12 15:41:35 +00:00
|
|
|
emit_interface_block(stage_in_var_id);
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
emit_interface_block(patch_stage_in_var_id);
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2017-06-15 19:24:22 +00:00
|
|
|
// Emit declarations for the specialization Metal function constants
|
2019-01-10 08:49:33 +00:00
|
|
|
void CompilerMSL::emit_specialization_constants_and_structs()
|
2017-06-15 19:24:22 +00:00
|
|
|
{
|
2017-09-29 08:13:45 +00:00
|
|
|
SpecializationConstant wg_x, wg_y, wg_z;
|
|
|
|
uint32_t workgroup_size_id = get_work_group_size_specialization_constants(wg_x, wg_y, wg_z);
|
2018-05-15 12:20:16 +00:00
|
|
|
bool emitted = false;
|
2017-06-15 19:24:22 +00:00
|
|
|
|
2019-01-10 08:49:33 +00:00
|
|
|
unordered_set<uint32_t> declared_structs;
|
|
|
|
|
|
|
|
for (auto &id_ : ir.ids_for_constant_or_type)
|
2017-06-15 19:24:22 +00:00
|
|
|
{
|
2019-01-10 08:49:33 +00:00
|
|
|
auto &id = ir.ids[id_];
|
|
|
|
|
2018-05-15 12:20:16 +00:00
|
|
|
if (id.get_type() == TypeConstant)
|
|
|
|
{
|
|
|
|
auto &c = id.get<SPIRConstant>();
|
2017-09-29 08:13:45 +00:00
|
|
|
|
2018-05-15 12:20:16 +00:00
|
|
|
if (c.self == workgroup_size_id)
|
|
|
|
{
|
2018-11-01 10:22:14 +00:00
|
|
|
// TODO: This can be expressed as a [[threads_per_threadgroup]] input semantic, but we need to know
|
|
|
|
// the work group size at compile time in SPIR-V, and [[threads_per_threadgroup]] would need to be passed around as a global.
|
|
|
|
// The work group size may be a specialization constant.
|
2019-04-02 09:19:03 +00:00
|
|
|
statement("constant uint3 ", builtin_to_glsl(BuiltInWorkgroupSize, StorageClassWorkgroup),
|
|
|
|
" [[maybe_unused]] = ", constant_expression(get<SPIRConstant>(workgroup_size_id)), ";");
|
2018-11-01 10:22:14 +00:00
|
|
|
emitted = true;
|
2018-05-15 12:20:16 +00:00
|
|
|
}
|
2018-11-01 10:22:14 +00:00
|
|
|
else if (c.specialization)
|
2018-05-15 12:20:16 +00:00
|
|
|
{
|
2018-11-01 10:22:14 +00:00
|
|
|
auto &type = get<SPIRType>(c.constant_type);
|
|
|
|
string sc_type_name = type_to_glsl(type);
|
|
|
|
string sc_name = to_name(c.self);
|
|
|
|
string sc_tmp_name = sc_name + "_tmp";
|
|
|
|
|
|
|
|
// Function constants are only supported in MSL 1.2 and later.
|
|
|
|
// If we don't support it just declare the "default" directly.
|
|
|
|
// This "default" value can be overridden to the true specialization constant by the API user.
|
|
|
|
// Specialization constants which are used as array length expressions cannot be function constants in MSL,
|
|
|
|
// so just fall back to macros.
|
2018-11-01 10:23:33 +00:00
|
|
|
if (msl_options.supports_msl_version(1, 2) && has_decoration(c.self, DecorationSpecId) &&
|
|
|
|
!c.is_used_as_array_length)
|
2018-11-01 10:22:14 +00:00
|
|
|
{
|
|
|
|
uint32_t constant_id = get_decoration(c.self, DecorationSpecId);
|
|
|
|
// Only scalar, non-composite values can be function constants.
|
|
|
|
statement("constant ", sc_type_name, " ", sc_tmp_name, " [[function_constant(", constant_id,
|
|
|
|
")]];");
|
|
|
|
statement("constant ", sc_type_name, " ", sc_name, " = is_function_constant_defined(", sc_tmp_name,
|
|
|
|
") ? ", sc_tmp_name, " : ", constant_expression(c), ";");
|
|
|
|
}
|
|
|
|
else if (has_decoration(c.self, DecorationSpecId))
|
|
|
|
{
|
|
|
|
// Fallback to macro overrides.
|
|
|
|
c.specialization_constant_macro_name =
|
2018-11-01 10:23:33 +00:00
|
|
|
constant_value_macro_name(get_decoration(c.self, DecorationSpecId));
|
2018-11-01 10:22:14 +00:00
|
|
|
|
|
|
|
statement("#ifndef ", c.specialization_constant_macro_name);
|
|
|
|
statement("#define ", c.specialization_constant_macro_name, " ", constant_expression(c));
|
|
|
|
statement("#endif");
|
2018-11-01 10:23:33 +00:00
|
|
|
statement("constant ", sc_type_name, " ", sc_name, " = ", c.specialization_constant_macro_name,
|
|
|
|
";");
|
2018-11-01 10:22:14 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// Composite specialization constants must be built from other specialization constants.
|
|
|
|
statement("constant ", sc_type_name, " ", sc_name, " = ", constant_expression(c), ";");
|
|
|
|
}
|
|
|
|
emitted = true;
|
2018-05-15 12:20:16 +00:00
|
|
|
}
|
2017-09-29 08:13:45 +00:00
|
|
|
}
|
2018-05-15 12:20:16 +00:00
|
|
|
else if (id.get_type() == TypeConstantOp)
|
2017-09-29 08:13:45 +00:00
|
|
|
{
|
2018-05-15 12:20:16 +00:00
|
|
|
auto &c = id.get<SPIRConstantOp>();
|
|
|
|
auto &type = get<SPIRType>(c.basetype);
|
|
|
|
auto name = to_name(c.self);
|
|
|
|
statement("constant ", variable_decl(type, name), " = ", constant_op_expression(c), ";");
|
|
|
|
emitted = true;
|
2017-09-29 08:13:45 +00:00
|
|
|
}
|
2019-01-10 08:49:33 +00:00
|
|
|
else if (id.get_type() == TypeType)
|
|
|
|
{
|
|
|
|
// Output non-builtin interface structs. These include local function structs
|
|
|
|
// and structs nested within uniform and read-write buffers.
|
|
|
|
auto &type = id.get<SPIRType>();
|
|
|
|
uint32_t type_id = type.self;
|
|
|
|
|
|
|
|
bool is_struct = (type.basetype == SPIRType::Struct) && type.array.empty();
|
|
|
|
bool is_block =
|
|
|
|
has_decoration(type.self, DecorationBlock) || has_decoration(type.self, DecorationBufferBlock);
|
|
|
|
|
|
|
|
bool is_builtin_block = is_block && is_builtin_type(type);
|
|
|
|
bool is_declarable_struct = is_struct && !is_builtin_block;
|
|
|
|
|
|
|
|
// We'll declare this later.
|
2019-02-04 05:58:46 +00:00
|
|
|
if (stage_out_var_id && get_stage_out_struct_type().self == type_id)
|
|
|
|
is_declarable_struct = false;
|
|
|
|
if (patch_stage_out_var_id && get_patch_stage_out_struct_type().self == type_id)
|
2019-01-10 08:49:33 +00:00
|
|
|
is_declarable_struct = false;
|
2019-02-04 05:58:46 +00:00
|
|
|
if (stage_in_var_id && get_stage_in_struct_type().self == type_id)
|
2019-01-10 08:49:33 +00:00
|
|
|
is_declarable_struct = false;
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
if (patch_stage_in_var_id && get_patch_stage_in_struct_type().self == type_id)
|
|
|
|
is_declarable_struct = false;
|
2019-01-10 08:49:33 +00:00
|
|
|
|
|
|
|
// Align and emit declarable structs...but avoid declaring each more than once.
|
|
|
|
if (is_declarable_struct && declared_structs.count(type_id) == 0)
|
|
|
|
{
|
|
|
|
if (emitted)
|
|
|
|
statement("");
|
|
|
|
emitted = false;
|
|
|
|
|
|
|
|
declared_structs.insert(type_id);
|
|
|
|
|
2019-01-17 10:22:24 +00:00
|
|
|
if (has_extended_decoration(type_id, SPIRVCrossDecorationPacked))
|
2019-01-10 08:49:33 +00:00
|
|
|
align_struct(type);
|
|
|
|
|
|
|
|
// Make sure we declare the underlying struct type, and not the "decorated" type with pointers, etc.
|
|
|
|
emit_struct(get<SPIRType>(type_id));
|
|
|
|
}
|
|
|
|
}
|
2017-06-15 19:24:22 +00:00
|
|
|
}
|
2017-09-29 08:13:45 +00:00
|
|
|
|
2018-05-15 12:20:16 +00:00
|
|
|
if (emitted)
|
2017-09-29 08:13:45 +00:00
|
|
|
statement("");
|
2017-06-15 19:24:22 +00:00
|
|
|
}
|
|
|
|
|
2018-08-31 18:46:02 +00:00
|
|
|
void CompilerMSL::emit_binary_unord_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1,
|
|
|
|
const char *op)
|
|
|
|
{
|
|
|
|
bool forward = should_forward(op0) && should_forward(op1);
|
|
|
|
emit_op(result_type, result_id,
|
|
|
|
join("(isunordered(", to_enclosed_unpacked_expression(op0), ", ", to_enclosed_unpacked_expression(op1),
|
|
|
|
") || ", to_enclosed_unpacked_expression(op0), " ", op, " ", to_enclosed_unpacked_expression(op1),
|
|
|
|
")"),
|
|
|
|
forward);
|
|
|
|
|
|
|
|
inherit_expression_dependencies(result_id, op0);
|
|
|
|
inherit_expression_dependencies(result_id, op1);
|
|
|
|
}
|
|
|
|
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
bool CompilerMSL::emit_tessellation_access_chain(const uint32_t *ops, uint32_t length)
|
2019-02-14 08:28:17 +00:00
|
|
|
{
|
|
|
|
// If this is a per-vertex output, remap it to the I/O array buffer.
|
|
|
|
auto *var = maybe_get<SPIRVariable>(ops[2]);
|
|
|
|
BuiltIn bi_type = BuiltIn(get_decoration(ops[2], DecorationBuiltIn));
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
if (var &&
|
|
|
|
(var->storage == StorageClassInput ||
|
|
|
|
(get_execution_model() == ExecutionModelTessellationControl && var->storage == StorageClassOutput)) &&
|
2019-02-15 23:21:38 +00:00
|
|
|
!(has_decoration(ops[2], DecorationPatch) || is_patch_block(get_variable_data_type(*var))) &&
|
2019-02-14 08:28:17 +00:00
|
|
|
(!is_builtin_variable(*var) || bi_type == BuiltInPosition || bi_type == BuiltInPointSize ||
|
|
|
|
bi_type == BuiltInClipDistance || bi_type == BuiltInCullDistance ||
|
|
|
|
get_variable_data_type(*var).basetype == SPIRType::Struct))
|
|
|
|
{
|
|
|
|
AccessChainMeta meta;
|
2019-04-02 09:19:03 +00:00
|
|
|
SmallVector<uint32_t> indices;
|
2019-02-14 08:28:17 +00:00
|
|
|
uint32_t next_id = ir.increase_bound_by(2);
|
|
|
|
|
|
|
|
indices.reserve(length - 3 + 1);
|
|
|
|
uint32_t type_id = next_id++;
|
|
|
|
SPIRType new_uint_type;
|
|
|
|
new_uint_type.basetype = SPIRType::UInt;
|
|
|
|
new_uint_type.width = 32;
|
|
|
|
set<SPIRType>(type_id, new_uint_type);
|
|
|
|
|
|
|
|
indices.push_back(ops[3]);
|
|
|
|
|
|
|
|
uint32_t const_mbr_id = next_id++;
|
|
|
|
uint32_t index = get_extended_decoration(ops[2], SPIRVCrossDecorationInterfaceMemberIndex);
|
|
|
|
uint32_t ptr = var->storage == StorageClassInput ? stage_in_ptr_var_id : stage_out_ptr_var_id;
|
2019-02-14 08:28:46 +00:00
|
|
|
if (var->storage == StorageClassInput || has_decoration(get_variable_element_type(*var).self, DecorationBlock))
|
2019-02-14 08:28:17 +00:00
|
|
|
{
|
|
|
|
uint32_t i = 4;
|
|
|
|
auto *type = &get_variable_element_type(*var);
|
|
|
|
if (index == uint32_t(-1) && length >= 5)
|
|
|
|
{
|
|
|
|
// Maybe this is a struct type in the input class, in which case
|
|
|
|
// we put it as a decoration on the corresponding member.
|
|
|
|
index = get_extended_member_decoration(ops[2], get_constant(ops[4]).scalar(),
|
|
|
|
SPIRVCrossDecorationInterfaceMemberIndex);
|
|
|
|
assert(index != uint32_t(-1));
|
|
|
|
i++;
|
|
|
|
type = &get<SPIRType>(type->member_types[get_constant(ops[4]).scalar()]);
|
|
|
|
}
|
|
|
|
// In this case, we flattened structures and arrays, so now we have to
|
|
|
|
// combine the following indices. If we encounter a non-constant index,
|
|
|
|
// we're hosed.
|
|
|
|
for (; i < length; ++i)
|
|
|
|
{
|
|
|
|
if (!is_array(*type) && !is_matrix(*type) && type->basetype != SPIRType::Struct)
|
|
|
|
break;
|
|
|
|
|
|
|
|
auto &c = get_constant(ops[i]);
|
|
|
|
index += c.scalar();
|
|
|
|
if (type->parent_type)
|
|
|
|
type = &get<SPIRType>(type->parent_type);
|
|
|
|
else if (type->basetype == SPIRType::Struct)
|
|
|
|
type = &get<SPIRType>(type->member_types[c.scalar()]);
|
|
|
|
}
|
|
|
|
// If the access chain terminates at a composite type, the composite
|
|
|
|
// itself might be copied. In that case, we must unflatten it.
|
|
|
|
if (is_matrix(*type) || is_array(*type) || type->basetype == SPIRType::Struct)
|
|
|
|
{
|
|
|
|
std::string temp_name = join(to_name(var->self), "_", ops[1]);
|
|
|
|
statement(variable_decl(*type, temp_name, var->self), ";");
|
|
|
|
// Set up the initializer for this temporary variable.
|
|
|
|
indices.push_back(const_mbr_id);
|
|
|
|
if (type->basetype == SPIRType::Struct)
|
|
|
|
{
|
|
|
|
for (uint32_t j = 0; j < type->member_types.size(); j++)
|
|
|
|
{
|
2019-02-14 08:28:46 +00:00
|
|
|
index = get_extended_member_decoration(ops[2], j, SPIRVCrossDecorationInterfaceMemberIndex);
|
2019-02-14 08:28:17 +00:00
|
|
|
const auto &mbr_type = get<SPIRType>(type->member_types[j]);
|
|
|
|
if (is_matrix(mbr_type))
|
|
|
|
{
|
|
|
|
for (uint32_t k = 0; k < mbr_type.columns; k++, index++)
|
|
|
|
{
|
|
|
|
set<SPIRConstant>(const_mbr_id, type_id, index, false);
|
2019-02-21 21:11:45 +00:00
|
|
|
auto e = access_chain(ptr, indices.data(), uint32_t(indices.size()), mbr_type, nullptr,
|
|
|
|
true);
|
2019-02-14 08:28:17 +00:00
|
|
|
statement(temp_name, ".", to_member_name(*type, j), "[", k, "] = ", e, ";");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (is_array(mbr_type))
|
|
|
|
{
|
|
|
|
for (uint32_t k = 0; k < mbr_type.array[0]; k++, index++)
|
|
|
|
{
|
|
|
|
set<SPIRConstant>(const_mbr_id, type_id, index, false);
|
2019-02-21 21:11:45 +00:00
|
|
|
auto e = access_chain(ptr, indices.data(), uint32_t(indices.size()), mbr_type, nullptr,
|
|
|
|
true);
|
2019-02-14 08:28:17 +00:00
|
|
|
statement(temp_name, ".", to_member_name(*type, j), "[", k, "] = ", e, ";");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
set<SPIRConstant>(const_mbr_id, type_id, index, false);
|
2019-02-21 21:11:45 +00:00
|
|
|
auto e =
|
|
|
|
access_chain(ptr, indices.data(), uint32_t(indices.size()), mbr_type, nullptr, true);
|
2019-02-14 08:28:17 +00:00
|
|
|
statement(temp_name, ".", to_member_name(*type, j), " = ", e, ";");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (is_matrix(*type))
|
|
|
|
{
|
|
|
|
for (uint32_t j = 0; j < type->columns; j++, index++)
|
|
|
|
{
|
|
|
|
set<SPIRConstant>(const_mbr_id, type_id, index, false);
|
2019-02-21 21:11:45 +00:00
|
|
|
auto e = access_chain(ptr, indices.data(), uint32_t(indices.size()), *type, nullptr, true);
|
2019-02-14 08:28:17 +00:00
|
|
|
statement(temp_name, "[", j, "] = ", e, ";");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else // Must be an array
|
|
|
|
{
|
|
|
|
assert(is_array(*type));
|
|
|
|
for (uint32_t j = 0; j < type->array[0]; j++, index++)
|
|
|
|
{
|
|
|
|
set<SPIRConstant>(const_mbr_id, type_id, index, false);
|
2019-02-21 21:11:45 +00:00
|
|
|
auto e = access_chain(ptr, indices.data(), uint32_t(indices.size()), *type, nullptr, true);
|
2019-02-14 08:28:17 +00:00
|
|
|
statement(temp_name, "[", j, "] = ", e, ";");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This needs to be a variable instead of an expression so we don't
|
|
|
|
// try to dereference this as a variable pointer.
|
|
|
|
set<SPIRVariable>(ops[1], ops[0], var->storage);
|
|
|
|
ir.meta[ops[1]] = ir.meta[ops[2]];
|
|
|
|
set_name(ops[1], temp_name);
|
|
|
|
if (has_decoration(var->self, DecorationInvariant))
|
|
|
|
set_decoration(ops[1], DecorationInvariant);
|
|
|
|
for (uint32_t j = 2; j < length; j++)
|
|
|
|
inherit_expression_dependencies(ops[1], ops[j]);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
set<SPIRConstant>(const_mbr_id, type_id, index, false);
|
|
|
|
indices.push_back(const_mbr_id);
|
|
|
|
|
|
|
|
if (i < length)
|
|
|
|
indices.insert(indices.end(), ops + i, ops + length);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
assert(index != uint32_t(-1));
|
|
|
|
set<SPIRConstant>(const_mbr_id, type_id, index, false);
|
|
|
|
indices.push_back(const_mbr_id);
|
|
|
|
|
|
|
|
indices.insert(indices.end(), ops + 4, ops + length);
|
|
|
|
}
|
|
|
|
|
|
|
|
// We use the pointer to the base of the input/output array here,
|
|
|
|
// so this is always a pointer chain.
|
2019-02-21 21:11:45 +00:00
|
|
|
auto e = access_chain(ptr, indices.data(), uint32_t(indices.size()), get<SPIRType>(ops[0]), &meta, true);
|
2019-02-14 08:28:17 +00:00
|
|
|
auto &expr = set<SPIRExpression>(ops[1], move(e), ops[0], should_forward(ops[2]));
|
|
|
|
expr.loaded_from = var->self;
|
|
|
|
expr.need_transpose = meta.need_transpose;
|
|
|
|
expr.access_chain = true;
|
|
|
|
|
|
|
|
// Mark the result as being packed if necessary.
|
|
|
|
if (meta.storage_is_packed)
|
|
|
|
set_extended_decoration(ops[1], SPIRVCrossDecorationPacked);
|
|
|
|
if (meta.storage_packed_type != 0)
|
|
|
|
set_extended_decoration(ops[1], SPIRVCrossDecorationPackedType, meta.storage_packed_type);
|
|
|
|
if (meta.storage_is_invariant)
|
|
|
|
set_decoration(ops[1], DecorationInvariant);
|
|
|
|
|
|
|
|
for (uint32_t i = 2; i < length; i++)
|
|
|
|
{
|
|
|
|
inherit_expression_dependencies(ops[1], ops[i]);
|
|
|
|
add_implied_read_expression(expr, ops[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this is the inner tessellation level, and we're tessellating triangles,
|
|
|
|
// drop the last index. It isn't an array in this case, so we can't have an
|
|
|
|
// array reference here. We need to make this ID a variable instead of an
|
|
|
|
// expression so we don't try to dereference it as a variable pointer.
|
2019-02-20 06:33:46 +00:00
|
|
|
// Don't do this if the index is a constant 1, though. We need to drop stores
|
|
|
|
// to that one.
|
2019-02-14 08:28:17 +00:00
|
|
|
auto *m = ir.find_meta(var ? var->self : 0);
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
if (get_execution_model() == ExecutionModelTessellationControl && var && m &&
|
|
|
|
m->decoration.builtin_type == BuiltInTessLevelInner && get_entry_point().flags.get(ExecutionModeTriangles))
|
2019-02-14 08:28:17 +00:00
|
|
|
{
|
2019-02-20 15:19:25 +00:00
|
|
|
auto *c = maybe_get<SPIRConstant>(ops[3]);
|
2019-02-20 16:27:15 +00:00
|
|
|
if (c && c->scalar() == 1)
|
|
|
|
return false;
|
2019-02-14 08:28:17 +00:00
|
|
|
auto &dest_var = set<SPIRVariable>(ops[1], *var);
|
|
|
|
dest_var.basetype = ops[0];
|
|
|
|
ir.meta[ops[1]] = ir.meta[ops[2]];
|
|
|
|
inherit_expression_dependencies(ops[1], ops[2]);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-02-20 15:19:25 +00:00
|
|
|
bool CompilerMSL::is_out_of_bounds_tessellation_level(uint32_t id_lhs)
|
|
|
|
{
|
2019-02-20 06:33:46 +00:00
|
|
|
if (!get_entry_point().flags.get(ExecutionModeTriangles))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// In SPIR-V, TessLevelInner always has two elements and TessLevelOuter always has
|
|
|
|
// four. This is true even if we are tessellating triangles. This allows clients
|
|
|
|
// to use a single tessellation control shader with multiple tessellation evaluation
|
|
|
|
// shaders.
|
|
|
|
// In Metal, however, only the first element of TessLevelInner and the first three
|
|
|
|
// of TessLevelOuter are accessible. This stems from how in Metal, the tessellation
|
|
|
|
// levels must be stored to a dedicated buffer in a particular format that depends
|
|
|
|
// on the patch type. Therefore, in Triangles mode, any access to the second
|
|
|
|
// inner level or the fourth outer level must be dropped.
|
|
|
|
const auto *e = maybe_get<SPIRExpression>(id_lhs);
|
2019-02-20 16:27:15 +00:00
|
|
|
if (!e || !e->access_chain)
|
|
|
|
return false;
|
2019-02-20 06:33:46 +00:00
|
|
|
BuiltIn builtin = BuiltIn(get_decoration(e->loaded_from, DecorationBuiltIn));
|
2019-02-20 16:27:15 +00:00
|
|
|
if (builtin != BuiltInTessLevelInner && builtin != BuiltInTessLevelOuter)
|
|
|
|
return false;
|
2019-02-20 06:33:46 +00:00
|
|
|
auto *c = maybe_get<SPIRConstant>(e->implied_read_expressions[1]);
|
2019-02-20 16:27:15 +00:00
|
|
|
if (!c)
|
|
|
|
return false;
|
|
|
|
return (builtin == BuiltInTessLevelInner && c->scalar() == 1) ||
|
|
|
|
(builtin == BuiltInTessLevelOuter && c->scalar() == 3);
|
2019-02-20 06:33:46 +00:00
|
|
|
}
|
|
|
|
|
2016-10-27 22:47:17 +00:00
|
|
|
// Override for MSL-specific syntax instructions
|
|
|
|
void CompilerMSL::emit_instruction(const Instruction &instruction)
|
|
|
|
{
|
2018-06-28 20:57:52 +00:00
|
|
|
#define MSL_BOP(op) emit_binary_op(ops[0], ops[1], ops[2], ops[3], #op)
|
|
|
|
#define MSL_BOP_CAST(op, type) \
|
2016-11-12 09:04:50 +00:00
|
|
|
emit_binary_op_cast(ops[0], ops[1], ops[2], ops[3], #op, type, opcode_is_sign_invariant(opcode))
|
2018-06-28 20:57:52 +00:00
|
|
|
#define MSL_UOP(op) emit_unary_op(ops[0], ops[1], ops[2], #op)
|
|
|
|
#define MSL_QFOP(op) emit_quaternary_func_op(ops[0], ops[1], ops[2], ops[3], ops[4], ops[5], #op)
|
|
|
|
#define MSL_TFOP(op) emit_trinary_func_op(ops[0], ops[1], ops[2], ops[3], ops[4], #op)
|
|
|
|
#define MSL_BFOP(op) emit_binary_func_op(ops[0], ops[1], ops[2], ops[3], #op)
|
|
|
|
#define MSL_BFOP_CAST(op, type) \
|
2016-11-12 09:04:50 +00:00
|
|
|
emit_binary_func_op_cast(ops[0], ops[1], ops[2], ops[3], #op, type, opcode_is_sign_invariant(opcode))
|
2018-06-28 20:57:52 +00:00
|
|
|
#define MSL_UFOP(op) emit_unary_func_op(ops[0], ops[1], ops[2], #op)
|
2018-08-31 18:46:02 +00:00
|
|
|
#define MSL_UNORD_BOP(op) emit_binary_unord_op(ops[0], ops[1], ops[2], ops[3], #op)
|
2016-10-27 22:47:17 +00:00
|
|
|
|
|
|
|
auto ops = stream(instruction);
|
|
|
|
auto opcode = static_cast<Op>(instruction.op);
|
|
|
|
|
2019-01-30 13:49:55 +00:00
|
|
|
// If we need to do implicit bitcasts, make sure we do it with the correct type.
|
|
|
|
uint32_t integer_width = get_integer_width_for_instruction(instruction);
|
|
|
|
auto int_type = to_signed_basetype(integer_width);
|
|
|
|
auto uint_type = to_unsigned_basetype(integer_width);
|
|
|
|
|
2016-10-27 22:47:17 +00:00
|
|
|
switch (opcode)
|
|
|
|
{
|
2016-12-04 17:32:58 +00:00
|
|
|
|
2016-10-27 22:47:17 +00:00
|
|
|
// Comparisons
|
|
|
|
case OpIEqual:
|
2019-01-30 13:49:55 +00:00
|
|
|
MSL_BOP_CAST(==, int_type);
|
|
|
|
break;
|
|
|
|
|
2016-10-27 22:47:17 +00:00
|
|
|
case OpLogicalEqual:
|
|
|
|
case OpFOrdEqual:
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_BOP(==);
|
2016-10-27 22:47:17 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case OpINotEqual:
|
2019-01-30 13:49:55 +00:00
|
|
|
MSL_BOP_CAST(!=, int_type);
|
|
|
|
break;
|
|
|
|
|
2016-10-27 22:47:17 +00:00
|
|
|
case OpLogicalNotEqual:
|
|
|
|
case OpFOrdNotEqual:
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_BOP(!=);
|
2016-10-27 22:47:17 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case OpUGreaterThan:
|
2019-01-30 13:49:55 +00:00
|
|
|
MSL_BOP_CAST(>, uint_type);
|
|
|
|
break;
|
|
|
|
|
2016-10-27 22:47:17 +00:00
|
|
|
case OpSGreaterThan:
|
2019-01-30 13:49:55 +00:00
|
|
|
MSL_BOP_CAST(>, int_type);
|
|
|
|
break;
|
|
|
|
|
2016-10-27 22:47:17 +00:00
|
|
|
case OpFOrdGreaterThan:
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_BOP(>);
|
2016-10-27 22:47:17 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case OpUGreaterThanEqual:
|
2019-01-30 13:49:55 +00:00
|
|
|
MSL_BOP_CAST(>=, uint_type);
|
|
|
|
break;
|
|
|
|
|
2016-10-27 22:47:17 +00:00
|
|
|
case OpSGreaterThanEqual:
|
2019-01-30 13:49:55 +00:00
|
|
|
MSL_BOP_CAST(>=, int_type);
|
|
|
|
break;
|
|
|
|
|
2016-10-27 22:47:17 +00:00
|
|
|
case OpFOrdGreaterThanEqual:
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_BOP(>=);
|
2016-10-27 22:47:17 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case OpULessThan:
|
2019-01-30 13:49:55 +00:00
|
|
|
MSL_BOP_CAST(<, uint_type);
|
|
|
|
break;
|
|
|
|
|
2016-10-27 22:47:17 +00:00
|
|
|
case OpSLessThan:
|
2019-01-30 13:49:55 +00:00
|
|
|
MSL_BOP_CAST(<, int_type);
|
|
|
|
break;
|
|
|
|
|
2016-10-27 22:47:17 +00:00
|
|
|
case OpFOrdLessThan:
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_BOP(<);
|
2016-10-27 22:47:17 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case OpULessThanEqual:
|
2019-01-30 13:49:55 +00:00
|
|
|
MSL_BOP_CAST(<=, uint_type);
|
|
|
|
break;
|
|
|
|
|
2016-10-27 22:47:17 +00:00
|
|
|
case OpSLessThanEqual:
|
2019-01-30 13:49:55 +00:00
|
|
|
MSL_BOP_CAST(<=, int_type);
|
|
|
|
break;
|
|
|
|
|
2016-10-27 22:47:17 +00:00
|
|
|
case OpFOrdLessThanEqual:
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_BOP(<=);
|
2016-10-27 22:47:17 +00:00
|
|
|
break;
|
|
|
|
|
2018-08-31 18:46:02 +00:00
|
|
|
case OpFUnordEqual:
|
|
|
|
MSL_UNORD_BOP(==);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OpFUnordNotEqual:
|
|
|
|
MSL_UNORD_BOP(!=);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OpFUnordGreaterThan:
|
|
|
|
MSL_UNORD_BOP(>);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OpFUnordGreaterThanEqual:
|
|
|
|
MSL_UNORD_BOP(>=);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OpFUnordLessThan:
|
|
|
|
MSL_UNORD_BOP(<);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OpFUnordLessThanEqual:
|
|
|
|
MSL_UNORD_BOP(<=);
|
|
|
|
break;
|
|
|
|
|
2016-10-27 22:47:17 +00:00
|
|
|
// Derivatives
|
|
|
|
case OpDPdx:
|
2017-05-19 22:14:08 +00:00
|
|
|
case OpDPdxFine:
|
|
|
|
case OpDPdxCoarse:
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_UFOP(dfdx);
|
2018-03-12 16:34:54 +00:00
|
|
|
register_control_dependent_expression(ops[1]);
|
2016-10-27 22:47:17 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case OpDPdy:
|
2017-05-19 22:14:08 +00:00
|
|
|
case OpDPdyFine:
|
|
|
|
case OpDPdyCoarse:
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_UFOP(dfdy);
|
2018-03-12 16:34:54 +00:00
|
|
|
register_control_dependent_expression(ops[1]);
|
2016-10-27 22:47:17 +00:00
|
|
|
break;
|
|
|
|
|
2018-03-06 16:07:59 +00:00
|
|
|
case OpFwidth:
|
|
|
|
case OpFwidthCoarse:
|
|
|
|
case OpFwidthFine:
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_UFOP(fwidth);
|
2018-03-12 16:34:54 +00:00
|
|
|
register_control_dependent_expression(ops[1]);
|
2018-03-06 16:07:59 +00:00
|
|
|
break;
|
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
// Bitfield
|
|
|
|
case OpBitFieldInsert:
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_QFOP(insert_bits);
|
2017-05-19 22:14:08 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case OpBitFieldSExtract:
|
|
|
|
case OpBitFieldUExtract:
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_TFOP(extract_bits);
|
2017-05-19 22:14:08 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case OpBitReverse:
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_UFOP(reverse_bits);
|
2017-05-19 22:14:08 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case OpBitCount:
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_UFOP(popcount);
|
2017-05-19 22:14:08 +00:00
|
|
|
break;
|
|
|
|
|
2018-02-15 12:31:29 +00:00
|
|
|
case OpFRem:
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_BFOP(fmod);
|
2018-02-15 12:31:29 +00:00
|
|
|
break;
|
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
// Atomics
|
|
|
|
case OpAtomicExchange:
|
2016-10-27 22:47:17 +00:00
|
|
|
{
|
|
|
|
uint32_t result_type = ops[0];
|
|
|
|
uint32_t id = ops[1];
|
2017-05-19 22:14:08 +00:00
|
|
|
uint32_t ptr = ops[2];
|
|
|
|
uint32_t mem_sem = ops[4];
|
|
|
|
uint32_t val = ops[5];
|
|
|
|
emit_atomic_func_op(result_type, id, "atomic_exchange_explicit", mem_sem, mem_sem, false, ptr, val);
|
|
|
|
break;
|
|
|
|
}
|
2016-10-27 22:47:17 +00:00
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
case OpAtomicCompareExchange:
|
|
|
|
{
|
|
|
|
uint32_t result_type = ops[0];
|
|
|
|
uint32_t id = ops[1];
|
|
|
|
uint32_t ptr = ops[2];
|
|
|
|
uint32_t mem_sem_pass = ops[4];
|
|
|
|
uint32_t mem_sem_fail = ops[5];
|
|
|
|
uint32_t val = ops[6];
|
|
|
|
uint32_t comp = ops[7];
|
|
|
|
emit_atomic_func_op(result_type, id, "atomic_compare_exchange_weak_explicit", mem_sem_pass, mem_sem_fail, true,
|
2018-09-10 19:47:35 +00:00
|
|
|
ptr, comp, true, false, val);
|
2017-05-19 22:14:08 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-05-15 14:03:20 +00:00
|
|
|
case OpAtomicCompareExchangeWeak:
|
|
|
|
SPIRV_CROSS_THROW("OpAtomicCompareExchangeWeak is only supported in kernel profile.");
|
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
case OpAtomicLoad:
|
|
|
|
{
|
|
|
|
uint32_t result_type = ops[0];
|
|
|
|
uint32_t id = ops[1];
|
|
|
|
uint32_t ptr = ops[2];
|
|
|
|
uint32_t mem_sem = ops[4];
|
|
|
|
emit_atomic_func_op(result_type, id, "atomic_load_explicit", mem_sem, mem_sem, false, ptr, 0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpAtomicStore:
|
|
|
|
{
|
|
|
|
uint32_t result_type = expression_type(ops[0]).self;
|
|
|
|
uint32_t id = ops[0];
|
|
|
|
uint32_t ptr = ops[0];
|
|
|
|
uint32_t mem_sem = ops[2];
|
|
|
|
uint32_t val = ops[3];
|
|
|
|
emit_atomic_func_op(result_type, id, "atomic_store_explicit", mem_sem, mem_sem, false, ptr, val);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-09-10 19:47:35 +00:00
|
|
|
#define MSL_AFMO_IMPL(op, valsrc, valconst) \
|
|
|
|
do \
|
|
|
|
{ \
|
|
|
|
uint32_t result_type = ops[0]; \
|
|
|
|
uint32_t id = ops[1]; \
|
|
|
|
uint32_t ptr = ops[2]; \
|
|
|
|
uint32_t mem_sem = ops[4]; \
|
|
|
|
uint32_t val = valsrc; \
|
|
|
|
emit_atomic_func_op(result_type, id, "atomic_fetch_" #op "_explicit", mem_sem, mem_sem, false, ptr, val, \
|
|
|
|
false, valconst); \
|
2017-05-22 17:38:23 +00:00
|
|
|
} while (false)
|
2017-05-19 22:14:08 +00:00
|
|
|
|
2018-09-10 19:47:35 +00:00
|
|
|
#define MSL_AFMO(op) MSL_AFMO_IMPL(op, ops[5], false)
|
|
|
|
#define MSL_AFMIO(op) MSL_AFMO_IMPL(op, 1, true)
|
2017-05-19 22:14:08 +00:00
|
|
|
|
|
|
|
case OpAtomicIIncrement:
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_AFMIO(add);
|
2017-05-22 17:38:23 +00:00
|
|
|
break;
|
2017-05-19 22:14:08 +00:00
|
|
|
|
|
|
|
case OpAtomicIDecrement:
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_AFMIO(sub);
|
2017-05-22 17:38:23 +00:00
|
|
|
break;
|
2017-05-19 22:14:08 +00:00
|
|
|
|
|
|
|
case OpAtomicIAdd:
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_AFMO(add);
|
2017-05-22 17:38:23 +00:00
|
|
|
break;
|
2017-05-19 22:14:08 +00:00
|
|
|
|
|
|
|
case OpAtomicISub:
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_AFMO(sub);
|
2017-05-22 17:38:23 +00:00
|
|
|
break;
|
2017-05-19 22:14:08 +00:00
|
|
|
|
|
|
|
case OpAtomicSMin:
|
|
|
|
case OpAtomicUMin:
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_AFMO(min);
|
2017-05-22 17:38:23 +00:00
|
|
|
break;
|
2017-05-19 22:14:08 +00:00
|
|
|
|
|
|
|
case OpAtomicSMax:
|
|
|
|
case OpAtomicUMax:
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_AFMO(max);
|
2017-05-22 17:38:23 +00:00
|
|
|
break;
|
2017-05-19 22:14:08 +00:00
|
|
|
|
|
|
|
case OpAtomicAnd:
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_AFMO(and);
|
2017-05-22 17:38:23 +00:00
|
|
|
break;
|
2017-05-19 22:14:08 +00:00
|
|
|
|
|
|
|
case OpAtomicOr:
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_AFMO(or);
|
2017-05-22 17:38:23 +00:00
|
|
|
break;
|
2017-05-19 22:14:08 +00:00
|
|
|
|
|
|
|
case OpAtomicXor:
|
2018-07-26 04:50:33 +00:00
|
|
|
MSL_AFMO(xor);
|
2017-05-22 17:38:23 +00:00
|
|
|
break;
|
2017-05-19 22:14:08 +00:00
|
|
|
|
|
|
|
// Images
|
|
|
|
|
2017-06-01 20:29:39 +00:00
|
|
|
// Reads == Fetches in Metal
|
2017-05-19 22:14:08 +00:00
|
|
|
case OpImageRead:
|
2017-06-01 20:29:39 +00:00
|
|
|
{
|
|
|
|
// Mark that this shader reads from this image
|
|
|
|
uint32_t img_id = ops[2];
|
2018-02-09 10:27:23 +00:00
|
|
|
auto &type = expression_type(img_id);
|
|
|
|
if (type.image.dim != DimSubpassData)
|
2017-12-06 17:51:23 +00:00
|
|
|
{
|
2018-02-09 10:27:23 +00:00
|
|
|
auto *p_var = maybe_get_backing_variable(img_id);
|
|
|
|
if (p_var && has_decoration(p_var->self, DecorationNonReadable))
|
|
|
|
{
|
|
|
|
unset_decoration(p_var->self, DecorationNonReadable);
|
2019-04-05 10:06:10 +00:00
|
|
|
force_recompile();
|
2018-02-09 10:27:23 +00:00
|
|
|
}
|
2017-12-06 17:51:23 +00:00
|
|
|
}
|
2017-06-01 20:29:39 +00:00
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
emit_texture_op(instruction);
|
|
|
|
break;
|
2017-06-01 20:29:39 +00:00
|
|
|
}
|
2017-05-19 22:14:08 +00:00
|
|
|
|
|
|
|
case OpImageWrite:
|
|
|
|
{
|
|
|
|
uint32_t img_id = ops[0];
|
|
|
|
uint32_t coord_id = ops[1];
|
|
|
|
uint32_t texel_id = ops[2];
|
|
|
|
const uint32_t *opt = &ops[3];
|
2018-09-04 06:15:17 +00:00
|
|
|
uint32_t length = instruction.length - 3;
|
2017-05-19 22:14:08 +00:00
|
|
|
|
|
|
|
// Bypass pointers because we need the real image struct
|
|
|
|
auto &type = expression_type(img_id);
|
|
|
|
auto &img_type = get<SPIRType>(type.self);
|
|
|
|
|
|
|
|
// Ensure this image has been marked as being written to and force a
|
|
|
|
// recommpile so that the image type output will include write access
|
2017-05-30 00:45:05 +00:00
|
|
|
auto *p_var = maybe_get_backing_variable(img_id);
|
|
|
|
if (p_var && has_decoration(p_var->self, DecorationNonWritable))
|
2016-10-27 22:47:17 +00:00
|
|
|
{
|
2017-05-30 00:45:05 +00:00
|
|
|
unset_decoration(p_var->self, DecorationNonWritable);
|
2019-04-05 10:06:10 +00:00
|
|
|
force_recompile();
|
2017-05-19 22:14:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool forward = false;
|
|
|
|
uint32_t bias = 0;
|
|
|
|
uint32_t lod = 0;
|
|
|
|
uint32_t flags = 0;
|
2016-10-27 22:47:17 +00:00
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
if (length)
|
|
|
|
{
|
|
|
|
flags = *opt++;
|
|
|
|
length--;
|
|
|
|
}
|
2016-11-12 09:04:50 +00:00
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
auto test = [&](uint32_t &v, uint32_t flag) {
|
|
|
|
if (length && (flags & flag))
|
|
|
|
{
|
|
|
|
v = *opt++;
|
|
|
|
length--;
|
2016-10-27 22:47:17 +00:00
|
|
|
}
|
2017-05-19 22:14:08 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
test(bias, ImageOperandsBiasMask);
|
|
|
|
test(lod, ImageOperandsLodMask);
|
|
|
|
|
2019-01-17 13:53:42 +00:00
|
|
|
auto &texel_type = expression_type(texel_id);
|
|
|
|
auto store_type = texel_type;
|
|
|
|
store_type.vecsize = 4;
|
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
statement(join(
|
2019-01-17 13:53:42 +00:00
|
|
|
to_expression(img_id), ".write(", remap_swizzle(store_type, texel_type.vecsize, to_expression(texel_id)),
|
|
|
|
", ",
|
2019-06-11 09:10:16 +00:00
|
|
|
to_function_args(img_id, img_type, true, false, false, coord_id, 0, 0, 0, 0, lod, 0, 0, 0, 0, 0, 0, &forward),
|
2017-05-19 22:14:08 +00:00
|
|
|
");"));
|
|
|
|
|
2017-05-30 00:45:05 +00:00
|
|
|
if (p_var && variable_storage_is_aliased(*p_var))
|
2017-05-19 22:14:08 +00:00
|
|
|
flush_all_aliased_variables();
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpImageQuerySize:
|
|
|
|
case OpImageQuerySizeLod:
|
|
|
|
{
|
|
|
|
uint32_t rslt_type_id = ops[0];
|
|
|
|
auto &rslt_type = get<SPIRType>(rslt_type_id);
|
|
|
|
|
|
|
|
uint32_t id = ops[1];
|
|
|
|
|
|
|
|
uint32_t img_id = ops[2];
|
|
|
|
string img_exp = to_expression(img_id);
|
|
|
|
auto &img_type = expression_type(img_id);
|
|
|
|
Dim img_dim = img_type.image.dim;
|
2017-11-06 08:49:52 +00:00
|
|
|
bool img_is_array = img_type.image.arrayed;
|
2017-05-19 22:14:08 +00:00
|
|
|
|
|
|
|
if (img_type.basetype != SPIRType::Image)
|
2016-12-12 21:33:22 +00:00
|
|
|
SPIRV_CROSS_THROW("Invalid type for OpImageQuerySize.");
|
2017-05-19 22:14:08 +00:00
|
|
|
|
|
|
|
string lod;
|
|
|
|
if (opcode == OpImageQuerySizeLod)
|
|
|
|
{
|
|
|
|
// LOD index defaults to zero, so don't bother outputing level zero index
|
|
|
|
string decl_lod = to_expression(ops[3]);
|
|
|
|
if (decl_lod != "0")
|
|
|
|
lod = decl_lod;
|
|
|
|
}
|
|
|
|
|
|
|
|
string expr = type_to_glsl(rslt_type) + "(";
|
|
|
|
expr += img_exp + ".get_width(" + lod + ")";
|
|
|
|
|
|
|
|
if (img_dim == Dim2D || img_dim == DimCube || img_dim == Dim3D)
|
|
|
|
expr += ", " + img_exp + ".get_height(" + lod + ")";
|
|
|
|
|
|
|
|
if (img_dim == Dim3D)
|
|
|
|
expr += ", " + img_exp + ".get_depth(" + lod + ")";
|
|
|
|
|
2017-11-06 08:49:52 +00:00
|
|
|
if (img_is_array)
|
2017-05-19 22:14:08 +00:00
|
|
|
expr += ", " + img_exp + ".get_array_size()";
|
|
|
|
|
|
|
|
expr += ")";
|
|
|
|
|
|
|
|
emit_op(rslt_type_id, id, expr, should_forward(img_id));
|
|
|
|
|
2016-10-27 22:47:17 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-09-13 11:44:46 +00:00
|
|
|
case OpImageQueryLod:
|
2019-06-12 09:36:52 +00:00
|
|
|
{
|
|
|
|
if (!msl_options.supports_msl_version(2, 2))
|
|
|
|
SPIRV_CROSS_THROW("ImageQueryLod is only supported on MSL 2.2 and up.");
|
|
|
|
uint32_t result_type = ops[0];
|
|
|
|
uint32_t id = ops[1];
|
|
|
|
uint32_t image_id = ops[2];
|
|
|
|
uint32_t coord_id = ops[3];
|
|
|
|
emit_uninitialized_temporary_expression(result_type, id);
|
|
|
|
|
|
|
|
auto sampler_expr = to_sampler_expression(image_id);
|
|
|
|
auto *combined = maybe_get<SPIRCombinedImageSampler>(image_id);
|
|
|
|
auto image_expr = combined ? to_expression(combined->image) : to_expression(image_id);
|
|
|
|
|
|
|
|
// TODO: It is unclear if calculcate_clamped_lod also conditionally rounds
|
|
|
|
// the reported LOD based on the sampler. NEAREST miplevel should
|
|
|
|
// round the LOD, but LINEAR miplevel should not round.
|
|
|
|
// Let's hope this does not become an issue ...
|
|
|
|
statement(to_expression(id), ".x = ",
|
|
|
|
image_expr, ".calculate_clamped_lod(",
|
|
|
|
sampler_expr, ", ", to_expression(coord_id), ");");
|
|
|
|
statement(to_expression(id), ".y = ",
|
|
|
|
image_expr, ".calculate_unclamped_lod(",
|
|
|
|
sampler_expr, ", ", to_expression(coord_id), ");");
|
|
|
|
register_control_dependent_expression(id);
|
|
|
|
break;
|
|
|
|
}
|
2018-09-13 11:44:46 +00:00
|
|
|
|
2018-06-28 20:57:52 +00:00
|
|
|
#define MSL_ImgQry(qrytype) \
|
2017-05-22 17:38:23 +00:00
|
|
|
do \
|
2017-05-19 22:14:08 +00:00
|
|
|
{ \
|
|
|
|
uint32_t rslt_type_id = ops[0]; \
|
|
|
|
auto &rslt_type = get<SPIRType>(rslt_type_id); \
|
|
|
|
uint32_t id = ops[1]; \
|
|
|
|
uint32_t img_id = ops[2]; \
|
|
|
|
string img_exp = to_expression(img_id); \
|
|
|
|
string expr = type_to_glsl(rslt_type) + "(" + img_exp + ".get_num_" #qrytype "())"; \
|
|
|
|
emit_op(rslt_type_id, id, expr, should_forward(img_id)); \
|
2017-05-22 17:38:23 +00:00
|
|
|
} while (false)
|
2017-05-19 22:14:08 +00:00
|
|
|
|
|
|
|
case OpImageQueryLevels:
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_ImgQry(mip_levels);
|
2017-05-22 17:38:23 +00:00
|
|
|
break;
|
2017-05-19 22:14:08 +00:00
|
|
|
|
2017-05-22 17:38:23 +00:00
|
|
|
case OpImageQuerySamples:
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_ImgQry(samples);
|
2017-05-22 17:38:23 +00:00
|
|
|
break;
|
2017-05-19 22:14:08 +00:00
|
|
|
|
2018-09-30 00:07:16 +00:00
|
|
|
case OpImage:
|
|
|
|
{
|
2018-10-05 07:49:57 +00:00
|
|
|
uint32_t result_type = ops[0];
|
2018-09-30 00:07:16 +00:00
|
|
|
uint32_t id = ops[1];
|
2018-10-05 07:49:57 +00:00
|
|
|
auto *combined = maybe_get<SPIRCombinedImageSampler>(ops[2]);
|
2018-09-30 00:07:16 +00:00
|
|
|
|
2018-10-05 07:49:57 +00:00
|
|
|
if (combined)
|
|
|
|
{
|
|
|
|
auto &e = emit_op(result_type, id, to_expression(combined->image), true, true);
|
|
|
|
auto *var = maybe_get_backing_variable(combined->image);
|
|
|
|
if (var)
|
|
|
|
e.loaded_from = var->self;
|
|
|
|
}
|
|
|
|
else
|
2018-09-30 00:07:16 +00:00
|
|
|
{
|
2018-10-05 07:49:57 +00:00
|
|
|
auto &e = emit_op(result_type, id, to_expression(ops[2]), true, true);
|
|
|
|
auto *var = maybe_get_backing_variable(ops[2]);
|
|
|
|
if (var)
|
|
|
|
e.loaded_from = var->self;
|
2018-09-30 00:07:16 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-04-23 10:17:21 +00:00
|
|
|
case OpImageTexelPointer:
|
|
|
|
SPIRV_CROSS_THROW("MSL does not support atomic operations on images or texel buffers.");
|
|
|
|
|
2017-05-22 17:38:23 +00:00
|
|
|
// Casting
|
|
|
|
case OpQuantizeToF16:
|
|
|
|
{
|
|
|
|
uint32_t result_type = ops[0];
|
|
|
|
uint32_t id = ops[1];
|
|
|
|
uint32_t arg = ops[2];
|
2017-05-19 22:14:08 +00:00
|
|
|
|
2017-05-22 17:38:23 +00:00
|
|
|
string exp;
|
|
|
|
auto &type = get<SPIRType>(result_type);
|
2017-05-19 22:14:08 +00:00
|
|
|
|
2017-05-22 17:38:23 +00:00
|
|
|
switch (type.vecsize)
|
|
|
|
{
|
|
|
|
case 1:
|
|
|
|
exp = join("float(half(", to_expression(arg), "))");
|
2017-05-19 22:14:08 +00:00
|
|
|
break;
|
2017-05-22 17:38:23 +00:00
|
|
|
case 2:
|
|
|
|
exp = join("float2(half2(", to_expression(arg), "))");
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
exp = join("float3(half3(", to_expression(arg), "))");
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
exp = join("float4(half4(", to_expression(arg), "))");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
SPIRV_CROSS_THROW("Illegal argument to OpQuantizeToF16.");
|
2017-05-19 22:14:08 +00:00
|
|
|
}
|
|
|
|
|
2017-05-22 17:38:23 +00:00
|
|
|
emit_op(result_type, id, exp, should_forward(arg));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
case OpInBoundsAccessChain:
|
|
|
|
case OpAccessChain:
|
|
|
|
case OpPtrAccessChain:
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
if (is_tessellation_shader())
|
2019-02-04 05:58:46 +00:00
|
|
|
{
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
if (!emit_tessellation_access_chain(ops, instruction.length))
|
2019-02-14 08:28:17 +00:00
|
|
|
CompilerGLSL::emit_instruction(instruction);
|
2019-02-04 05:58:46 +00:00
|
|
|
}
|
2019-02-14 08:28:17 +00:00
|
|
|
else
|
|
|
|
CompilerGLSL::emit_instruction(instruction);
|
2019-02-04 05:58:46 +00:00
|
|
|
break;
|
|
|
|
|
2017-08-12 04:21:13 +00:00
|
|
|
case OpStore:
|
2019-02-20 06:33:46 +00:00
|
|
|
if (is_out_of_bounds_tessellation_level(ops[0]))
|
|
|
|
break;
|
|
|
|
|
2017-11-06 02:34:42 +00:00
|
|
|
if (maybe_emit_array_assignment(ops[0], ops[1]))
|
|
|
|
break;
|
2017-08-12 04:21:13 +00:00
|
|
|
|
2017-11-06 02:34:42 +00:00
|
|
|
CompilerGLSL::emit_instruction(instruction);
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Compute barriers
|
|
|
|
case OpMemoryBarrier:
|
|
|
|
emit_barrier(0, ops[0], ops[1]);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OpControlBarrier:
|
|
|
|
// In GLSL a memory barrier is often followed by a control barrier.
|
|
|
|
// But in MSL, memory barriers are also control barriers, so don't
|
|
|
|
// emit a simple control barrier if a memory barrier has just been emitted.
|
|
|
|
if (previous_instruction_opcode != OpMemoryBarrier)
|
|
|
|
emit_barrier(ops[0], ops[1], ops[2]);
|
2017-08-12 04:21:13 +00:00
|
|
|
break;
|
|
|
|
|
2018-01-06 04:22:36 +00:00
|
|
|
case OpVectorTimesMatrix:
|
|
|
|
case OpMatrixTimesVector:
|
|
|
|
{
|
2018-02-11 21:52:57 +00:00
|
|
|
// If the matrix needs transpose and it is square or packed, just flip the multiply order.
|
2018-01-06 04:22:36 +00:00
|
|
|
uint32_t mtx_id = ops[opcode == OpMatrixTimesVector ? 2 : 3];
|
|
|
|
auto *e = maybe_get<SPIRExpression>(mtx_id);
|
|
|
|
auto &t = expression_type(mtx_id);
|
2019-01-17 10:22:24 +00:00
|
|
|
bool is_packed = has_extended_decoration(mtx_id, SPIRVCrossDecorationPacked);
|
2018-02-11 21:52:57 +00:00
|
|
|
if (e && e->need_transpose && (t.columns == t.vecsize || is_packed))
|
2018-01-06 04:22:36 +00:00
|
|
|
{
|
|
|
|
e->need_transpose = false;
|
2018-06-12 07:36:13 +00:00
|
|
|
|
|
|
|
// This is important for matrices. Packed matrices
|
|
|
|
// are generally transposed, so unpacking using a constructor argument
|
|
|
|
// will result in an error.
|
|
|
|
// The simplest solution for now is to just avoid unpacking the matrix in this operation.
|
2019-01-17 10:22:24 +00:00
|
|
|
unset_extended_decoration(mtx_id, SPIRVCrossDecorationPacked);
|
2018-06-12 07:36:13 +00:00
|
|
|
|
2018-01-06 04:22:36 +00:00
|
|
|
emit_binary_op(ops[0], ops[1], ops[3], ops[2], "*");
|
2018-06-12 07:36:13 +00:00
|
|
|
if (is_packed)
|
2019-01-17 10:22:24 +00:00
|
|
|
set_extended_decoration(mtx_id, SPIRVCrossDecorationPacked);
|
2018-01-06 04:22:36 +00:00
|
|
|
e->need_transpose = true;
|
|
|
|
}
|
|
|
|
else
|
2018-06-28 20:57:52 +00:00
|
|
|
MSL_BOP(*);
|
2018-01-06 04:22:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-12-05 08:58:12 +00:00
|
|
|
// OpOuterProduct
|
2017-05-19 22:14:08 +00:00
|
|
|
|
2018-11-13 20:11:50 +00:00
|
|
|
case OpIAddCarry:
|
|
|
|
case OpISubBorrow:
|
|
|
|
{
|
|
|
|
uint32_t result_type = ops[0];
|
|
|
|
uint32_t result_id = ops[1];
|
|
|
|
uint32_t op0 = ops[2];
|
|
|
|
uint32_t op1 = ops[3];
|
|
|
|
forced_temporaries.insert(result_id);
|
|
|
|
auto &type = get<SPIRType>(result_type);
|
|
|
|
statement(variable_decl(type, to_name(result_id)), ";");
|
|
|
|
set<SPIRExpression>(result_id, to_name(result_id), result_type, true);
|
|
|
|
|
|
|
|
auto &res_type = get<SPIRType>(type.member_types[1]);
|
|
|
|
if (opcode == OpIAddCarry)
|
|
|
|
{
|
|
|
|
statement(to_expression(result_id), ".", to_member_name(type, 0), " = ", to_enclosed_expression(op0), " + ",
|
|
|
|
to_enclosed_expression(op1), ";");
|
|
|
|
statement(to_expression(result_id), ".", to_member_name(type, 1), " = select(", type_to_glsl(res_type),
|
2018-11-14 16:13:56 +00:00
|
|
|
"(1), ", type_to_glsl(res_type), "(0), ", to_expression(result_id), ".", to_member_name(type, 0),
|
|
|
|
" >= max(", to_expression(op0), ", ", to_expression(op1), "));");
|
2018-11-13 20:11:50 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
statement(to_expression(result_id), ".", to_member_name(type, 0), " = ", to_enclosed_expression(op0), " - ",
|
|
|
|
to_enclosed_expression(op1), ";");
|
|
|
|
statement(to_expression(result_id), ".", to_member_name(type, 1), " = select(", type_to_glsl(res_type),
|
2018-11-14 16:13:56 +00:00
|
|
|
"(1), ", type_to_glsl(res_type), "(0), ", to_enclosed_expression(op0),
|
|
|
|
" >= ", to_enclosed_expression(op1), ");");
|
2018-11-13 20:11:50 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpUMulExtended:
|
|
|
|
case OpSMulExtended:
|
|
|
|
{
|
|
|
|
uint32_t result_type = ops[0];
|
|
|
|
uint32_t result_id = ops[1];
|
|
|
|
uint32_t op0 = ops[2];
|
|
|
|
uint32_t op1 = ops[3];
|
|
|
|
forced_temporaries.insert(result_id);
|
|
|
|
auto &type = get<SPIRType>(result_type);
|
|
|
|
statement(variable_decl(type, to_name(result_id)), ";");
|
|
|
|
set<SPIRExpression>(result_id, to_name(result_id), result_type, true);
|
|
|
|
|
|
|
|
statement(to_expression(result_id), ".", to_member_name(type, 0), " = ", to_enclosed_expression(op0), " * ",
|
|
|
|
to_enclosed_expression(op1), ";");
|
|
|
|
statement(to_expression(result_id), ".", to_member_name(type, 1), " = mulhi(", to_expression(op0), ", ",
|
|
|
|
to_expression(op1), ");");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-05-27 09:59:29 +00:00
|
|
|
case OpArrayLength:
|
|
|
|
{
|
|
|
|
auto &type = expression_type(ops[2]);
|
|
|
|
uint32_t offset = type_struct_member_offset(type, ops[3]);
|
|
|
|
uint32_t stride = type_struct_member_array_stride(type, ops[3]);
|
|
|
|
|
|
|
|
auto expr = join("(", to_buffer_size_expression(ops[2]), " - ", offset, ") / ", stride);
|
|
|
|
emit_op(ops[0], ops[1], expr, true);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-10-27 22:47:17 +00:00
|
|
|
default:
|
|
|
|
CompilerGLSL::emit_instruction(instruction);
|
|
|
|
break;
|
|
|
|
}
|
2017-11-06 02:34:42 +00:00
|
|
|
|
|
|
|
previous_instruction_opcode = opcode;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CompilerMSL::emit_barrier(uint32_t id_exe_scope, uint32_t id_mem_scope, uint32_t id_mem_sem)
|
|
|
|
{
|
2019-02-04 05:58:46 +00:00
|
|
|
if (get_execution_model() != ExecutionModelGLCompute && get_execution_model() != ExecutionModelTessellationControl)
|
2017-11-06 02:34:42 +00:00
|
|
|
return;
|
|
|
|
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
uint32_t exe_scope = id_exe_scope ? get<SPIRConstant>(id_exe_scope).scalar() : uint32_t(ScopeInvocation);
|
|
|
|
uint32_t mem_scope = id_mem_scope ? get<SPIRConstant>(id_mem_scope).scalar() : uint32_t(ScopeInvocation);
|
|
|
|
// Use the wider of the two scopes (smaller value)
|
|
|
|
exe_scope = min(exe_scope, mem_scope);
|
|
|
|
|
|
|
|
string bar_stmt;
|
|
|
|
if ((msl_options.is_ios() && msl_options.supports_msl_version(1, 2)) || msl_options.supports_msl_version(2))
|
|
|
|
bar_stmt = exe_scope < ScopeSubgroup ? "threadgroup_barrier" : "simdgroup_barrier";
|
|
|
|
else
|
|
|
|
bar_stmt = "threadgroup_barrier";
|
|
|
|
bar_stmt += "(";
|
2017-11-06 02:34:42 +00:00
|
|
|
|
2017-11-13 08:26:19 +00:00
|
|
|
uint32_t mem_sem = id_mem_sem ? get<SPIRConstant>(id_mem_sem).scalar() : uint32_t(MemorySemanticsMaskNone);
|
2017-11-06 02:34:42 +00:00
|
|
|
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
// Use the | operator to combine flags if we can.
|
|
|
|
if (msl_options.supports_msl_version(1, 2))
|
|
|
|
{
|
|
|
|
string mem_flags = "";
|
2019-02-04 05:58:46 +00:00
|
|
|
// For tesc shaders, this also affects objects in the Output storage class.
|
|
|
|
// Since in Metal, these are placed in a device buffer, we have to sync device memory here.
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
if (get_execution_model() == ExecutionModelTessellationControl ||
|
|
|
|
(mem_sem & (MemorySemanticsUniformMemoryMask | MemorySemanticsCrossWorkgroupMemoryMask)))
|
|
|
|
mem_flags += "mem_flags::mem_device";
|
|
|
|
if (mem_sem & (MemorySemanticsSubgroupMemoryMask | MemorySemanticsWorkgroupMemoryMask |
|
|
|
|
MemorySemanticsAtomicCounterMemoryMask))
|
|
|
|
{
|
|
|
|
if (!mem_flags.empty())
|
|
|
|
mem_flags += " | ";
|
|
|
|
mem_flags += "mem_flags::mem_threadgroup";
|
|
|
|
}
|
|
|
|
if (mem_sem & MemorySemanticsImageMemoryMask)
|
|
|
|
{
|
|
|
|
if (!mem_flags.empty())
|
|
|
|
mem_flags += " | ";
|
|
|
|
mem_flags += "mem_flags::mem_texture";
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mem_flags.empty())
|
|
|
|
mem_flags = "mem_flags::mem_none";
|
|
|
|
|
|
|
|
bar_stmt += mem_flags;
|
|
|
|
}
|
2017-12-26 18:39:07 +00:00
|
|
|
else
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
{
|
|
|
|
if ((mem_sem & (MemorySemanticsUniformMemoryMask | MemorySemanticsCrossWorkgroupMemoryMask)) &&
|
|
|
|
(mem_sem & (MemorySemanticsSubgroupMemoryMask | MemorySemanticsWorkgroupMemoryMask |
|
|
|
|
MemorySemanticsAtomicCounterMemoryMask)))
|
|
|
|
bar_stmt += "mem_flags::mem_device_and_threadgroup";
|
|
|
|
else if (mem_sem & (MemorySemanticsUniformMemoryMask | MemorySemanticsCrossWorkgroupMemoryMask))
|
|
|
|
bar_stmt += "mem_flags::mem_device";
|
|
|
|
else if (mem_sem & (MemorySemanticsSubgroupMemoryMask | MemorySemanticsWorkgroupMemoryMask |
|
|
|
|
MemorySemanticsAtomicCounterMemoryMask))
|
|
|
|
bar_stmt += "mem_flags::mem_threadgroup";
|
|
|
|
else if (mem_sem & MemorySemanticsImageMemoryMask)
|
|
|
|
bar_stmt += "mem_flags::mem_texture";
|
|
|
|
else
|
|
|
|
bar_stmt += "mem_flags::mem_none";
|
|
|
|
}
|
2017-11-06 02:34:42 +00:00
|
|
|
|
2018-12-11 21:28:29 +00:00
|
|
|
if (msl_options.is_ios() && (msl_options.supports_msl_version(2) && !msl_options.supports_msl_version(2, 1)))
|
2017-11-06 02:34:42 +00:00
|
|
|
{
|
|
|
|
bar_stmt += ", ";
|
|
|
|
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
switch (mem_scope)
|
2017-11-06 02:34:42 +00:00
|
|
|
{
|
|
|
|
case ScopeCrossDevice:
|
|
|
|
case ScopeDevice:
|
|
|
|
bar_stmt += "memory_scope_device";
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ScopeSubgroup:
|
|
|
|
case ScopeInvocation:
|
|
|
|
bar_stmt += "memory_scope_simdgroup";
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ScopeWorkgroup:
|
|
|
|
default:
|
|
|
|
bar_stmt += "memory_scope_threadgroup";
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bar_stmt += ");";
|
|
|
|
|
|
|
|
statement(bar_stmt);
|
2018-03-12 16:34:54 +00:00
|
|
|
|
|
|
|
assert(current_emitting_block);
|
|
|
|
flush_control_dependent_expressions(current_emitting_block->self);
|
|
|
|
flush_all_active_variables();
|
2016-10-27 22:47:17 +00:00
|
|
|
}
|
|
|
|
|
2018-02-05 11:37:41 +00:00
|
|
|
void CompilerMSL::emit_array_copy(const string &lhs, uint32_t rhs_id)
|
|
|
|
{
|
|
|
|
// Assignment from an array initializer is fine.
|
2018-09-11 10:58:03 +00:00
|
|
|
auto &type = expression_type(rhs_id);
|
|
|
|
auto *var = maybe_get_backing_variable(rhs_id);
|
|
|
|
|
|
|
|
// Unfortunately, we cannot template on address space in MSL,
|
|
|
|
// so explicit address space redirection it is ...
|
|
|
|
bool is_constant = false;
|
2018-10-05 09:30:57 +00:00
|
|
|
if (ir.ids[rhs_id].get_type() == TypeConstant)
|
2018-09-11 10:58:03 +00:00
|
|
|
{
|
|
|
|
is_constant = true;
|
|
|
|
}
|
|
|
|
else if (var && var->remapped_variable && var->statically_assigned &&
|
2018-10-05 09:30:57 +00:00
|
|
|
ir.ids[var->static_expression].get_type() == TypeConstant)
|
2018-09-11 10:58:03 +00:00
|
|
|
{
|
|
|
|
is_constant = true;
|
|
|
|
}
|
|
|
|
|
2019-04-09 10:28:46 +00:00
|
|
|
// For the case where we have OpLoad triggering an array copy,
|
|
|
|
// we cannot easily detect this case ahead of time since it's
|
|
|
|
// context dependent. We might have to force a recompile here
|
|
|
|
// if this is the only use of array copies in our shader.
|
|
|
|
if (type.array.size() > 1)
|
|
|
|
{
|
|
|
|
if (type.array.size() > SPVFuncImplArrayCopyMultidimMax)
|
|
|
|
SPIRV_CROSS_THROW("Cannot support this many dimensions for arrays of arrays.");
|
|
|
|
auto func = static_cast<SPVFuncImpl>(SPVFuncImplArrayCopyMultidimBase + type.array.size());
|
|
|
|
if (spv_function_implementations.count(func) == 0)
|
|
|
|
{
|
|
|
|
spv_function_implementations.insert(func);
|
|
|
|
suppress_missing_prototypes = true;
|
|
|
|
force_recompile();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (spv_function_implementations.count(SPVFuncImplArrayCopy) == 0)
|
|
|
|
{
|
|
|
|
spv_function_implementations.insert(SPVFuncImplArrayCopy);
|
|
|
|
suppress_missing_prototypes = true;
|
|
|
|
force_recompile();
|
|
|
|
}
|
|
|
|
|
2018-09-11 10:58:03 +00:00
|
|
|
const char *tag = is_constant ? "FromConstant" : "FromStack";
|
|
|
|
statement("spvArrayCopy", tag, type.array.size(), "(", lhs, ", ", to_expression(rhs_id), ");");
|
2018-02-05 11:37:41 +00:00
|
|
|
}
|
|
|
|
|
2017-11-06 02:34:42 +00:00
|
|
|
// Since MSL does not allow arrays to be copied via simple variable assignment,
|
|
|
|
// if the LHS and RHS represent an assignment of an entire array, it must be
|
|
|
|
// implemented by calling an array copy function.
|
|
|
|
// Returns whether the struct assignment was emitted.
|
|
|
|
bool CompilerMSL::maybe_emit_array_assignment(uint32_t id_lhs, uint32_t id_rhs)
|
|
|
|
{
|
|
|
|
// We only care about assignments of an entire array
|
|
|
|
auto &type = expression_type(id_rhs);
|
|
|
|
if (type.array.size() == 0)
|
|
|
|
return false;
|
|
|
|
|
2018-02-08 12:06:29 +00:00
|
|
|
auto *var = maybe_get<SPIRVariable>(id_lhs);
|
2018-07-05 11:25:57 +00:00
|
|
|
|
|
|
|
// Is this a remapped, static constant? Don't do anything.
|
2018-08-08 14:48:22 +00:00
|
|
|
if (var && var->remapped_variable && var->statically_assigned)
|
2018-07-05 11:25:57 +00:00
|
|
|
return true;
|
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
if (ir.ids[id_rhs].get_type() == TypeConstant && var && var->deferred_declaration)
|
2018-02-08 12:06:29 +00:00
|
|
|
{
|
|
|
|
// Special case, if we end up declaring a variable when assigning the constant array,
|
|
|
|
// we can avoid the copy by directly assigning the constant expression.
|
|
|
|
// This is likely necessary to be able to use a variable as a true look-up table, as it is unlikely
|
|
|
|
// the compiler will be able to optimize the spvArrayCopy() into a constant LUT.
|
|
|
|
// After a variable has been declared, we can no longer assign constant arrays in MSL unfortunately.
|
|
|
|
statement(to_expression(id_lhs), " = ", constant_expression(get<SPIRConstant>(id_rhs)), ";");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-11-06 02:34:42 +00:00
|
|
|
// Ensure the LHS variable has been declared
|
|
|
|
auto *p_v_lhs = maybe_get_backing_variable(id_lhs);
|
|
|
|
if (p_v_lhs)
|
|
|
|
flush_variable_declaration(p_v_lhs->self);
|
|
|
|
|
2018-02-05 11:37:41 +00:00
|
|
|
emit_array_copy(to_expression(id_lhs), id_rhs);
|
2017-11-06 02:34:42 +00:00
|
|
|
register_write(id_lhs);
|
|
|
|
|
|
|
|
return true;
|
2017-08-12 04:21:13 +00:00
|
|
|
}
|
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
// Emits one of the atomic functions. In MSL, the atomic functions operate on pointers
|
|
|
|
void CompilerMSL::emit_atomic_func_op(uint32_t result_type, uint32_t result_id, const char *op, uint32_t mem_order_1,
|
|
|
|
uint32_t mem_order_2, bool has_mem_order_2, uint32_t obj, uint32_t op1,
|
2018-09-13 13:56:23 +00:00
|
|
|
bool op1_is_pointer, bool op1_is_literal, uint32_t op2)
|
2017-05-19 22:14:08 +00:00
|
|
|
{
|
|
|
|
forced_temporaries.insert(result_id);
|
|
|
|
|
|
|
|
string exp = string(op) + "(";
|
|
|
|
|
2019-01-08 18:54:40 +00:00
|
|
|
auto &type = get_pointee_type(expression_type(obj));
|
2017-05-19 22:14:08 +00:00
|
|
|
exp += "(volatile ";
|
2018-05-15 14:03:20 +00:00
|
|
|
auto *var = maybe_get_backing_variable(obj);
|
|
|
|
if (!var)
|
|
|
|
SPIRV_CROSS_THROW("No backing variable for atomic operation.");
|
|
|
|
exp += get_argument_address_space(*var);
|
2017-05-19 22:14:08 +00:00
|
|
|
exp += " atomic_";
|
|
|
|
exp += type_to_glsl(type);
|
|
|
|
exp += "*)";
|
|
|
|
|
2018-05-15 14:03:20 +00:00
|
|
|
exp += "&";
|
|
|
|
exp += to_enclosed_expression(obj);
|
2017-05-19 22:14:08 +00:00
|
|
|
|
2018-05-15 14:03:20 +00:00
|
|
|
bool is_atomic_compare_exchange_strong = op1_is_pointer && op1;
|
|
|
|
|
|
|
|
if (is_atomic_compare_exchange_strong)
|
2017-05-19 22:14:08 +00:00
|
|
|
{
|
2018-05-15 14:03:20 +00:00
|
|
|
assert(strcmp(op, "atomic_compare_exchange_weak_explicit") == 0);
|
|
|
|
assert(op2);
|
|
|
|
assert(has_mem_order_2);
|
|
|
|
exp += ", &";
|
|
|
|
exp += to_name(result_id);
|
|
|
|
exp += ", ";
|
|
|
|
exp += to_expression(op2);
|
|
|
|
exp += ", ";
|
|
|
|
exp += get_memory_order(mem_order_1);
|
|
|
|
exp += ", ";
|
|
|
|
exp += get_memory_order(mem_order_2);
|
|
|
|
exp += ")";
|
|
|
|
|
2019-04-06 01:28:57 +00:00
|
|
|
// MSL only supports the weak atomic compare exchange, so emit a CAS loop here.
|
|
|
|
// The MSL function returns false if the atomic write fails OR the comparison test fails,
|
|
|
|
// so we must validate that it wasn't the comparison test that failed before continuing
|
|
|
|
// the CAS loop, otherwise it will loop infinitely, with the comparison test always failing.
|
|
|
|
// The function updates the comparitor value from the memory value, so the additional
|
|
|
|
// comparison test evaluates the memory value against the expected value.
|
2018-05-15 14:03:20 +00:00
|
|
|
statement(variable_decl(type, to_name(result_id)), ";");
|
|
|
|
statement("do");
|
|
|
|
begin_scope();
|
|
|
|
statement(to_name(result_id), " = ", to_expression(op1), ";");
|
2019-04-06 01:28:57 +00:00
|
|
|
end_scope_decl(join("while (!", exp, " && ", to_name(result_id), " == ", to_enclosed_expression(op1), ")"));
|
2018-05-15 14:03:20 +00:00
|
|
|
set<SPIRExpression>(result_id, to_name(result_id), result_type, true);
|
2017-05-19 22:14:08 +00:00
|
|
|
}
|
2018-05-15 14:03:20 +00:00
|
|
|
else
|
|
|
|
{
|
|
|
|
assert(strcmp(op, "atomic_compare_exchange_weak_explicit") != 0);
|
|
|
|
if (op1)
|
2018-09-10 19:47:35 +00:00
|
|
|
{
|
2018-09-13 13:56:23 +00:00
|
|
|
if (op1_is_literal)
|
2018-09-10 19:47:35 +00:00
|
|
|
exp += join(", ", op1);
|
|
|
|
else
|
|
|
|
exp += ", " + to_expression(op1);
|
|
|
|
}
|
2018-05-15 14:03:20 +00:00
|
|
|
if (op2)
|
|
|
|
exp += ", " + to_expression(op2);
|
2017-05-19 22:14:08 +00:00
|
|
|
|
2018-05-15 14:03:20 +00:00
|
|
|
exp += string(", ") + get_memory_order(mem_order_1);
|
|
|
|
if (has_mem_order_2)
|
|
|
|
exp += string(", ") + get_memory_order(mem_order_2);
|
2017-05-19 22:14:08 +00:00
|
|
|
|
2018-05-15 14:03:20 +00:00
|
|
|
exp += ")";
|
|
|
|
emit_op(result_type, result_id, exp, false);
|
|
|
|
}
|
2017-05-19 22:14:08 +00:00
|
|
|
|
|
|
|
flush_all_atomic_capable_variables();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Metal only supports relaxed memory order for now
|
|
|
|
const char *CompilerMSL::get_memory_order(uint32_t)
|
|
|
|
{
|
|
|
|
return "memory_order_relaxed";
|
|
|
|
}
|
|
|
|
|
2016-10-27 22:47:17 +00:00
|
|
|
// Override for MSL-specific extension syntax instructions
|
|
|
|
void CompilerMSL::emit_glsl_op(uint32_t result_type, uint32_t id, uint32_t eop, const uint32_t *args, uint32_t count)
|
|
|
|
{
|
2019-03-27 10:53:47 +00:00
|
|
|
auto op = static_cast<GLSLstd450>(eop);
|
|
|
|
|
|
|
|
// If we need to do implicit bitcasts, make sure we do it with the correct type.
|
|
|
|
uint32_t integer_width = get_integer_width_for_glsl_instruction(op, args, count);
|
|
|
|
auto int_type = to_signed_basetype(integer_width);
|
|
|
|
auto uint_type = to_unsigned_basetype(integer_width);
|
2016-10-27 22:47:17 +00:00
|
|
|
|
|
|
|
switch (op)
|
|
|
|
{
|
|
|
|
case GLSLstd450Atan2:
|
|
|
|
emit_binary_func_op(result_type, id, args[0], args[1], "atan2");
|
|
|
|
break;
|
2017-05-19 22:14:08 +00:00
|
|
|
case GLSLstd450InverseSqrt:
|
|
|
|
emit_unary_func_op(result_type, id, args[0], "rsqrt");
|
|
|
|
break;
|
|
|
|
case GLSLstd450RoundEven:
|
|
|
|
emit_unary_func_op(result_type, id, args[0], "rint");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case GLSLstd450FindSMsb:
|
2019-03-27 10:53:47 +00:00
|
|
|
emit_unary_func_op_cast(result_type, id, args[0], "findSMSB", int_type, int_type);
|
2017-05-19 22:14:08 +00:00
|
|
|
break;
|
2019-03-27 10:53:47 +00:00
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
case GLSLstd450FindUMsb:
|
2019-03-27 10:53:47 +00:00
|
|
|
emit_unary_func_op_cast(result_type, id, args[0], "findUMSB", uint_type, uint_type);
|
2017-05-19 22:14:08 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case GLSLstd450PackSnorm4x8:
|
|
|
|
emit_unary_func_op(result_type, id, args[0], "pack_float_to_snorm4x8");
|
|
|
|
break;
|
|
|
|
case GLSLstd450PackUnorm4x8:
|
|
|
|
emit_unary_func_op(result_type, id, args[0], "pack_float_to_unorm4x8");
|
|
|
|
break;
|
|
|
|
case GLSLstd450PackSnorm2x16:
|
|
|
|
emit_unary_func_op(result_type, id, args[0], "pack_float_to_snorm2x16");
|
|
|
|
break;
|
|
|
|
case GLSLstd450PackUnorm2x16:
|
|
|
|
emit_unary_func_op(result_type, id, args[0], "pack_float_to_unorm2x16");
|
|
|
|
break;
|
2018-03-12 16:51:14 +00:00
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
case GLSLstd450PackHalf2x16:
|
2018-03-12 16:51:14 +00:00
|
|
|
{
|
|
|
|
auto expr = join("as_type<uint>(half2(", to_expression(args[0]), "))");
|
|
|
|
emit_op(result_type, id, expr, should_forward(args[0]));
|
|
|
|
inherit_expression_dependencies(id, args[0]);
|
2017-05-19 22:14:08 +00:00
|
|
|
break;
|
2018-03-12 16:51:14 +00:00
|
|
|
}
|
2017-05-19 22:14:08 +00:00
|
|
|
|
|
|
|
case GLSLstd450UnpackSnorm4x8:
|
|
|
|
emit_unary_func_op(result_type, id, args[0], "unpack_snorm4x8_to_float");
|
|
|
|
break;
|
|
|
|
case GLSLstd450UnpackUnorm4x8:
|
|
|
|
emit_unary_func_op(result_type, id, args[0], "unpack_unorm4x8_to_float");
|
|
|
|
break;
|
|
|
|
case GLSLstd450UnpackSnorm2x16:
|
|
|
|
emit_unary_func_op(result_type, id, args[0], "unpack_snorm2x16_to_float");
|
|
|
|
break;
|
|
|
|
case GLSLstd450UnpackUnorm2x16:
|
|
|
|
emit_unary_func_op(result_type, id, args[0], "unpack_unorm2x16_to_float");
|
|
|
|
break;
|
2018-03-12 16:51:14 +00:00
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
case GLSLstd450UnpackHalf2x16:
|
2018-03-12 16:51:14 +00:00
|
|
|
{
|
|
|
|
auto expr = join("float2(as_type<half2>(", to_expression(args[0]), "))");
|
|
|
|
emit_op(result_type, id, expr, should_forward(args[0]));
|
|
|
|
inherit_expression_dependencies(id, args[0]);
|
2017-05-19 22:14:08 +00:00
|
|
|
break;
|
2018-03-12 16:51:14 +00:00
|
|
|
}
|
2017-05-19 22:14:08 +00:00
|
|
|
|
|
|
|
case GLSLstd450PackDouble2x32:
|
|
|
|
emit_unary_func_op(result_type, id, args[0], "unsupported_GLSLstd450PackDouble2x32"); // Currently unsupported
|
|
|
|
break;
|
|
|
|
case GLSLstd450UnpackDouble2x32:
|
|
|
|
emit_unary_func_op(result_type, id, args[0], "unsupported_GLSLstd450UnpackDouble2x32"); // Currently unsupported
|
|
|
|
break;
|
|
|
|
|
|
|
|
case GLSLstd450MatrixInverse:
|
|
|
|
{
|
|
|
|
auto &mat_type = get<SPIRType>(result_type);
|
|
|
|
switch (mat_type.columns)
|
|
|
|
{
|
|
|
|
case 2:
|
|
|
|
emit_unary_func_op(result_type, id, args[0], "spvInverse2x2");
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
emit_unary_func_op(result_type, id, args[0], "spvInverse3x3");
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
emit_unary_func_op(result_type, id, args[0], "spvInverse4x4");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
MSL: Emit F{Min,Max,Clamp} as fast:: and N{Min,Max,Clamp} as precise::.
This roughly matches their semantics in SPIR-V and MSL. For `FMin`,
`FMax`, and `FClamp`, and the Metal functions `fast::min()`,
`fast::max()`, and `fast::clamp()`, the result is undefined if any
operand is NaN. For the 'N' operations and their corresponding MSL
`precise::` functions, the result is consistent with IEEE 754 (first
non-NaN wins; result is NaN if all operands are NaN).
We can only do this with 32-bit floats, though, because Metal only
provides these variants for `float`. `half` only has one variant of
these functions that is presumably consistent with IEEE 754. I guess
that's OK; the SPIR-V spec only says that `F{Min,Max,Clamp}` are
undefined for NaNs. Performance might suffer, though.
2018-08-31 23:15:07 +00:00
|
|
|
case GLSLstd450FMin:
|
|
|
|
// If the result type isn't float, don't bother calling the specific
|
|
|
|
// precise::/fast:: version. Metal doesn't have those for half and
|
|
|
|
// double types.
|
|
|
|
if (get<SPIRType>(result_type).basetype != SPIRType::Float)
|
|
|
|
emit_binary_func_op(result_type, id, args[0], args[1], "min");
|
|
|
|
else
|
|
|
|
emit_binary_func_op(result_type, id, args[0], args[1], "fast::min");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case GLSLstd450FMax:
|
|
|
|
if (get<SPIRType>(result_type).basetype != SPIRType::Float)
|
|
|
|
emit_binary_func_op(result_type, id, args[0], args[1], "max");
|
|
|
|
else
|
|
|
|
emit_binary_func_op(result_type, id, args[0], args[1], "fast::max");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case GLSLstd450FClamp:
|
|
|
|
// TODO: If args[1] is 0 and args[2] is 1, emit a saturate() call.
|
|
|
|
if (get<SPIRType>(result_type).basetype != SPIRType::Float)
|
|
|
|
emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "clamp");
|
|
|
|
else
|
|
|
|
emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "fast::clamp");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case GLSLstd450NMin:
|
|
|
|
if (get<SPIRType>(result_type).basetype != SPIRType::Float)
|
|
|
|
emit_binary_func_op(result_type, id, args[0], args[1], "min");
|
|
|
|
else
|
|
|
|
emit_binary_func_op(result_type, id, args[0], args[1], "precise::min");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case GLSLstd450NMax:
|
|
|
|
if (get<SPIRType>(result_type).basetype != SPIRType::Float)
|
|
|
|
emit_binary_func_op(result_type, id, args[0], args[1], "max");
|
|
|
|
else
|
|
|
|
emit_binary_func_op(result_type, id, args[0], args[1], "precise::max");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case GLSLstd450NClamp:
|
|
|
|
// TODO: If args[1] is 0 and args[2] is 1, emit a saturate() call.
|
|
|
|
if (get<SPIRType>(result_type).basetype != SPIRType::Float)
|
|
|
|
emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "clamp");
|
|
|
|
else
|
|
|
|
emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "precise::clamp");
|
|
|
|
break;
|
|
|
|
|
2017-12-05 08:58:12 +00:00
|
|
|
// TODO:
|
|
|
|
// GLSLstd450InterpolateAtCentroid (centroid_no_perspective qualifier)
|
|
|
|
// GLSLstd450InterpolateAtSample (sample_no_perspective qualifier)
|
|
|
|
// GLSLstd450InterpolateAtOffset
|
2016-10-27 22:47:17 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
CompilerGLSL::emit_glsl_op(result_type, id, eop, args, count);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-06 21:42:27 +00:00
|
|
|
// Emit a structure declaration for the specified interface variable.
|
|
|
|
void CompilerMSL::emit_interface_block(uint32_t ib_var_id)
|
|
|
|
{
|
2016-05-05 07:33:18 +00:00
|
|
|
if (ib_var_id)
|
|
|
|
{
|
|
|
|
auto &ib_var = get<SPIRVariable>(ib_var_id);
|
2019-02-04 05:58:46 +00:00
|
|
|
auto &ib_type = get_variable_data_type(ib_var);
|
2018-09-13 12:42:05 +00:00
|
|
|
assert(ib_type.basetype == SPIRType::Struct && !ib_type.member_types.empty());
|
|
|
|
emit_struct(ib_type);
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Emits the declaration signature of the specified function.
|
|
|
|
// If this is the entry point function, Metal-specific return value and function arguments are added.
|
2018-03-12 12:09:25 +00:00
|
|
|
void CompilerMSL::emit_function_prototype(SPIRFunction &func, const Bitset &)
|
2016-04-06 21:42:27 +00:00
|
|
|
{
|
2018-10-05 09:30:57 +00:00
|
|
|
if (func.self != ir.default_entry_point)
|
2018-02-23 13:13:46 +00:00
|
|
|
add_function_overload(func);
|
|
|
|
|
2016-05-23 10:25:09 +00:00
|
|
|
local_variable_names = resource_names;
|
2016-05-05 07:33:18 +00:00
|
|
|
string decl;
|
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
processing_entry_point = (func.self == ir.default_entry_point);
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
auto &type = get<SPIRType>(func.return_type);
|
2018-02-05 11:37:41 +00:00
|
|
|
|
|
|
|
if (type.array.empty())
|
|
|
|
{
|
|
|
|
decl += func_type_decl(type);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// We cannot return arrays in MSL, so "return" through an out variable.
|
2018-02-05 11:51:18 +00:00
|
|
|
decl = "void";
|
2018-02-05 11:37:41 +00:00
|
|
|
}
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
decl += " ";
|
2017-11-06 02:34:42 +00:00
|
|
|
decl += to_name(func.self);
|
2016-05-05 07:33:18 +00:00
|
|
|
decl += "(";
|
|
|
|
|
2018-02-05 11:37:41 +00:00
|
|
|
if (!type.array.empty())
|
|
|
|
{
|
|
|
|
// Fake arrays returns by writing to an out array instead.
|
2018-02-05 12:03:40 +00:00
|
|
|
decl += "thread ";
|
2018-02-05 11:37:41 +00:00
|
|
|
decl += type_to_glsl(type);
|
|
|
|
decl += " (&SPIRV_Cross_return_value)";
|
|
|
|
decl += type_to_array_glsl(type);
|
|
|
|
if (!func.arguments.empty())
|
|
|
|
decl += ", ";
|
|
|
|
}
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
if (processing_entry_point)
|
|
|
|
{
|
2019-03-14 09:29:34 +00:00
|
|
|
if (msl_options.argument_buffers)
|
|
|
|
decl += entry_point_args_argument_buffer(!func.arguments.empty());
|
|
|
|
else
|
|
|
|
decl += entry_point_args_classic(!func.arguments.empty());
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2018-06-12 15:41:35 +00:00
|
|
|
// If entry point function has variables that require early declaration,
|
|
|
|
// ensure they each have an empty initializer, creating one if needed.
|
|
|
|
// This is done at this late stage because the initialization expression
|
|
|
|
// is cleared after each compilation pass.
|
|
|
|
for (auto var_id : vars_needing_early_declaration)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
2018-06-12 15:41:35 +00:00
|
|
|
auto &ed_var = get<SPIRVariable>(var_id);
|
2019-01-30 09:29:08 +00:00
|
|
|
uint32_t &initializer = ed_var.initializer;
|
|
|
|
if (!initializer)
|
|
|
|
initializer = ir.increase_bound_by(1);
|
2018-06-12 15:41:35 +00:00
|
|
|
|
2019-01-30 09:29:08 +00:00
|
|
|
// Do not override proper initializers.
|
|
|
|
if (ir.ids[initializer].get_type() == TypeNone || ir.ids[initializer].get_type() == TypeExpression)
|
|
|
|
set<SPIRExpression>(ed_var.initializer, "{}", ed_var.basetype, true);
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto &arg : func.arguments)
|
|
|
|
{
|
2018-08-27 07:59:55 +00:00
|
|
|
uint32_t name_id = arg.id;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
auto *var = maybe_get<SPIRVariable>(arg.id);
|
2016-10-24 13:24:24 +00:00
|
|
|
if (var)
|
|
|
|
{
|
2018-08-27 07:59:55 +00:00
|
|
|
// If we need to modify the name of the variable, make sure we modify the original variable.
|
|
|
|
// Our alias is just a shadow variable.
|
|
|
|
if (arg.alias_global_variable && var->basevariable)
|
|
|
|
name_id = var->basevariable;
|
|
|
|
|
2016-10-24 13:24:24 +00:00
|
|
|
var->parameter = &arg; // Hold a pointer to the parameter so we can invalidate the readonly field if needed.
|
|
|
|
}
|
2016-07-06 20:55:45 +00:00
|
|
|
|
2018-08-27 07:59:55 +00:00
|
|
|
add_local_variable_name(name_id);
|
|
|
|
|
2016-10-24 13:24:24 +00:00
|
|
|
decl += argument_decl(arg);
|
2016-07-07 00:30:47 +00:00
|
|
|
|
2016-10-24 13:24:24 +00:00
|
|
|
// Manufacture automatic sampler arg for SampledImage texture
|
|
|
|
auto &arg_type = get<SPIRType>(arg.type);
|
2017-12-26 21:32:45 +00:00
|
|
|
if (arg_type.basetype == SPIRType::SampledImage && arg_type.image.dim != DimBuffer)
|
2018-04-03 12:26:24 +00:00
|
|
|
decl += join(", thread const ", sampler_type(arg_type), " ", to_sampler_expression(arg.id));
|
2016-07-07 00:30:47 +00:00
|
|
|
|
2019-01-13 23:31:50 +00:00
|
|
|
// Manufacture automatic swizzle arg.
|
|
|
|
if (msl_options.swizzle_texture_samples && has_sampled_images && is_sampled_image_type(arg_type))
|
2019-05-09 09:25:45 +00:00
|
|
|
{
|
2019-05-09 10:28:34 +00:00
|
|
|
bool arg_is_array = !arg_type.array.empty();
|
2019-05-27 09:59:29 +00:00
|
|
|
decl += join(", constant uint", arg_is_array ? "* " : "& ", to_swizzle_expression(arg.id));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (buffers_requiring_array_length.count(name_id))
|
|
|
|
{
|
|
|
|
bool arg_is_array = !arg_type.array.empty();
|
|
|
|
decl += join(", constant uint", arg_is_array ? "* " : "& ", to_buffer_size_expression(name_id));
|
2019-05-09 09:25:45 +00:00
|
|
|
}
|
2019-01-13 23:31:50 +00:00
|
|
|
|
2016-10-24 13:24:24 +00:00
|
|
|
if (&arg != &func.arguments.back())
|
|
|
|
decl += ", ";
|
|
|
|
}
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
decl += ")";
|
2017-01-20 16:33:59 +00:00
|
|
|
statement(decl);
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2016-12-28 23:36:42 +00:00
|
|
|
// Returns the texture sampling function string for the specified image and sampling characteristics.
|
2018-09-20 01:36:33 +00:00
|
|
|
string CompilerMSL::to_function_name(uint32_t img, const SPIRType &imgtype, bool is_fetch, bool is_gather, bool, bool,
|
2019-06-11 09:10:16 +00:00
|
|
|
bool has_offset, bool, bool has_dref, uint32_t, uint32_t)
|
2016-04-06 21:42:27 +00:00
|
|
|
{
|
2018-09-20 01:36:33 +00:00
|
|
|
// Special-case gather. We have to alter the component being looked up
|
|
|
|
// in the swizzle case.
|
2018-09-29 14:52:26 +00:00
|
|
|
if (msl_options.swizzle_texture_samples && is_gather)
|
2018-09-20 01:36:33 +00:00
|
|
|
{
|
2018-09-29 14:52:26 +00:00
|
|
|
string fname = imgtype.image.depth ? "spvGatherCompareSwizzle" : "spvGatherSwizzle";
|
|
|
|
fname += "<" + type_to_glsl(get<SPIRType>(imgtype.image.type)) + ", metal::" + type_to_glsl(imgtype);
|
2018-09-20 01:36:33 +00:00
|
|
|
// Add the arg types ourselves. Yes, this sucks, but Clang can't
|
|
|
|
// deduce template pack parameters in the middle of an argument list.
|
|
|
|
switch (imgtype.image.dim)
|
|
|
|
{
|
|
|
|
case Dim2D:
|
|
|
|
fname += ", float2";
|
|
|
|
if (imgtype.image.arrayed)
|
|
|
|
fname += ", uint";
|
2018-09-29 14:52:26 +00:00
|
|
|
if (imgtype.image.depth)
|
|
|
|
fname += ", float";
|
|
|
|
if (!imgtype.image.depth || has_offset)
|
|
|
|
fname += ", int2";
|
2018-09-20 01:36:33 +00:00
|
|
|
break;
|
|
|
|
case DimCube:
|
|
|
|
fname += ", float3";
|
|
|
|
if (imgtype.image.arrayed)
|
|
|
|
fname += ", uint";
|
2018-09-29 14:52:26 +00:00
|
|
|
if (imgtype.image.depth)
|
|
|
|
fname += ", float";
|
2018-09-20 01:36:33 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
SPIRV_CROSS_THROW("Invalid texture dimension for gather op.");
|
|
|
|
}
|
|
|
|
fname += ">";
|
|
|
|
return fname;
|
|
|
|
}
|
|
|
|
|
2018-10-05 07:49:57 +00:00
|
|
|
auto *combined = maybe_get<SPIRCombinedImageSampler>(img);
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
// Texture reference
|
2018-10-05 07:49:57 +00:00
|
|
|
string fname = to_expression(combined ? combined->image : img) + ".";
|
2018-09-24 17:33:11 +00:00
|
|
|
if (msl_options.swizzle_texture_samples && !is_gather && is_sampled_image_type(imgtype))
|
2018-09-20 01:36:33 +00:00
|
|
|
fname = "spvTextureSwizzle(" + fname;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
// Texture function and sampler
|
2016-12-28 23:36:42 +00:00
|
|
|
if (is_fetch)
|
|
|
|
fname += "read";
|
|
|
|
else if (is_gather)
|
|
|
|
fname += "gather";
|
2016-05-05 07:33:18 +00:00
|
|
|
else
|
2016-12-28 23:36:42 +00:00
|
|
|
fname += "sample";
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2016-12-28 23:36:42 +00:00
|
|
|
if (has_dref)
|
|
|
|
fname += "_compare";
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2016-12-28 23:36:42 +00:00
|
|
|
return fname;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns the function args for a texture sampling function for the specified image and sampling characteristics.
|
2018-09-20 01:36:33 +00:00
|
|
|
string CompilerMSL::to_function_args(uint32_t img, const SPIRType &imgtype, bool is_fetch, bool is_gather, bool is_proj,
|
2017-01-07 20:55:04 +00:00
|
|
|
uint32_t coord, uint32_t, uint32_t dref, uint32_t grad_x, uint32_t grad_y,
|
|
|
|
uint32_t lod, uint32_t coffset, uint32_t offset, uint32_t bias, uint32_t comp,
|
2019-06-11 09:10:16 +00:00
|
|
|
uint32_t sample, uint32_t minlod, bool *p_forward)
|
2016-12-28 23:36:42 +00:00
|
|
|
{
|
2017-01-20 22:30:36 +00:00
|
|
|
string farg_str;
|
|
|
|
if (!is_fetch)
|
|
|
|
farg_str += to_sampler_expression(img);
|
2016-12-28 23:36:42 +00:00
|
|
|
|
2018-09-29 14:52:26 +00:00
|
|
|
if (msl_options.swizzle_texture_samples && is_gather)
|
2018-09-20 01:36:33 +00:00
|
|
|
{
|
|
|
|
if (!farg_str.empty())
|
|
|
|
farg_str += ", ";
|
2018-10-05 07:49:57 +00:00
|
|
|
|
|
|
|
auto *combined = maybe_get<SPIRCombinedImageSampler>(img);
|
|
|
|
farg_str += to_expression(combined ? combined->image : img);
|
2018-09-20 01:36:33 +00:00
|
|
|
}
|
|
|
|
|
2016-12-28 23:36:42 +00:00
|
|
|
// Texture coordinates
|
2016-05-05 07:33:18 +00:00
|
|
|
bool forward = should_forward(coord);
|
2016-12-06 22:03:35 +00:00
|
|
|
auto coord_expr = to_enclosed_expression(coord);
|
2017-02-09 17:14:30 +00:00
|
|
|
auto &coord_type = expression_type(coord);
|
2018-03-06 16:07:59 +00:00
|
|
|
bool coord_is_fp = type_is_floating_point(coord_type);
|
2017-04-25 20:32:16 +00:00
|
|
|
bool is_cube_fetch = false;
|
2017-02-09 17:14:30 +00:00
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
string tex_coords = coord_expr;
|
2019-01-28 08:39:45 +00:00
|
|
|
uint32_t alt_coord_component = 0;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2016-12-28 23:36:42 +00:00
|
|
|
switch (imgtype.image.dim)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
2017-02-09 17:14:30 +00:00
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
case Dim1D:
|
2017-02-09 17:14:30 +00:00
|
|
|
if (coord_type.vecsize > 1)
|
2019-01-28 08:39:45 +00:00
|
|
|
tex_coords = enclose_expression(tex_coords) + ".x";
|
2016-12-28 23:36:42 +00:00
|
|
|
|
2017-01-20 22:30:36 +00:00
|
|
|
if (is_fetch)
|
2017-04-25 20:32:16 +00:00
|
|
|
tex_coords = "uint(" + round_fp_tex_coords(tex_coords, coord_is_fp) + ")";
|
|
|
|
|
2019-01-28 08:39:45 +00:00
|
|
|
alt_coord_component = 1;
|
2017-02-09 17:14:30 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DimBuffer:
|
|
|
|
if (coord_type.vecsize > 1)
|
2019-01-28 08:39:45 +00:00
|
|
|
tex_coords = enclose_expression(tex_coords) + ".x";
|
2017-02-09 17:14:30 +00:00
|
|
|
|
2019-04-23 10:17:21 +00:00
|
|
|
if (msl_options.texture_buffer_native)
|
|
|
|
{
|
2019-04-23 10:46:48 +00:00
|
|
|
tex_coords = "uint(" + round_fp_tex_coords(tex_coords, coord_is_fp) + ")";
|
2019-04-23 10:17:21 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// Metal texel buffer textures are 2D, so convert 1D coord to 2D.
|
|
|
|
if (is_fetch)
|
|
|
|
tex_coords = "spvTexelBufferCoord(" + round_fp_tex_coords(tex_coords, coord_is_fp) + ")";
|
|
|
|
}
|
2017-04-25 20:32:16 +00:00
|
|
|
|
2019-01-28 08:39:45 +00:00
|
|
|
alt_coord_component = 1;
|
2016-05-05 07:33:18 +00:00
|
|
|
break;
|
|
|
|
|
2018-02-09 10:27:23 +00:00
|
|
|
case DimSubpassData:
|
|
|
|
if (imgtype.image.ms)
|
|
|
|
tex_coords = "uint2(gl_FragCoord.xy)";
|
|
|
|
else
|
|
|
|
tex_coords = join("uint2(gl_FragCoord.xy), 0");
|
|
|
|
break;
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
case Dim2D:
|
2017-04-25 15:25:50 +00:00
|
|
|
if (coord_type.vecsize > 2)
|
2019-01-28 08:39:45 +00:00
|
|
|
tex_coords = enclose_expression(tex_coords) + ".xy";
|
2017-01-20 22:30:36 +00:00
|
|
|
|
2017-04-25 15:25:50 +00:00
|
|
|
if (is_fetch)
|
2017-04-25 20:32:16 +00:00
|
|
|
tex_coords = "uint2(" + round_fp_tex_coords(tex_coords, coord_is_fp) + ")";
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2019-01-28 08:39:45 +00:00
|
|
|
alt_coord_component = 2;
|
2016-05-05 07:33:18 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case Dim3D:
|
2017-04-25 15:25:50 +00:00
|
|
|
if (coord_type.vecsize > 3)
|
2019-01-28 08:39:45 +00:00
|
|
|
tex_coords = enclose_expression(tex_coords) + ".xyz";
|
2017-01-20 22:30:36 +00:00
|
|
|
|
2017-04-25 15:25:50 +00:00
|
|
|
if (is_fetch)
|
2017-04-25 20:32:16 +00:00
|
|
|
tex_coords = "uint3(" + round_fp_tex_coords(tex_coords, coord_is_fp) + ")";
|
2017-01-20 22:30:36 +00:00
|
|
|
|
2019-01-28 08:39:45 +00:00
|
|
|
alt_coord_component = 3;
|
2017-01-20 22:30:36 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DimCube:
|
2017-04-25 15:25:50 +00:00
|
|
|
if (is_fetch)
|
2017-01-20 22:30:36 +00:00
|
|
|
{
|
2017-04-25 20:32:16 +00:00
|
|
|
is_cube_fetch = true;
|
|
|
|
tex_coords += ".xy";
|
|
|
|
tex_coords = "uint2(" + round_fp_tex_coords(tex_coords, coord_is_fp) + ")";
|
2017-01-20 22:30:36 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2017-04-25 15:25:50 +00:00
|
|
|
if (coord_type.vecsize > 3)
|
2019-01-28 08:39:45 +00:00
|
|
|
tex_coords = enclose_expression(tex_coords) + ".xyz";
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
|
|
|
|
2019-01-28 08:39:45 +00:00
|
|
|
alt_coord_component = 3;
|
2016-05-05 07:33:18 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-08-07 13:28:04 +00:00
|
|
|
if (is_fetch && offset)
|
|
|
|
{
|
|
|
|
// Fetch offsets must be applied directly to the coordinate.
|
|
|
|
forward = forward && should_forward(offset);
|
2018-08-07 14:02:17 +00:00
|
|
|
auto &type = expression_type(offset);
|
|
|
|
if (type.basetype != SPIRType::UInt)
|
|
|
|
tex_coords += " + " + bitcast_expression(SPIRType::UInt, offset);
|
|
|
|
else
|
|
|
|
tex_coords += " + " + to_enclosed_expression(offset);
|
2018-08-07 13:28:04 +00:00
|
|
|
}
|
|
|
|
else if (is_fetch && coffset)
|
|
|
|
{
|
|
|
|
// Fetch offsets must be applied directly to the coordinate.
|
|
|
|
forward = forward && should_forward(coffset);
|
2018-08-07 14:02:17 +00:00
|
|
|
auto &type = expression_type(coffset);
|
|
|
|
if (type.basetype != SPIRType::UInt)
|
|
|
|
tex_coords += " + " + bitcast_expression(SPIRType::UInt, coffset);
|
|
|
|
else
|
|
|
|
tex_coords += " + " + to_enclosed_expression(coffset);
|
2018-08-07 13:28:04 +00:00
|
|
|
}
|
|
|
|
|
2017-01-20 22:30:36 +00:00
|
|
|
// If projection, use alt coord as divisor
|
|
|
|
if (is_proj)
|
2019-01-28 08:39:45 +00:00
|
|
|
tex_coords += " / " + to_extract_component_expression(coord, alt_coord_component);
|
2016-12-28 23:36:42 +00:00
|
|
|
|
2017-01-20 22:30:36 +00:00
|
|
|
if (!farg_str.empty())
|
|
|
|
farg_str += ", ";
|
2016-12-28 23:36:42 +00:00
|
|
|
farg_str += tex_coords;
|
|
|
|
|
2017-01-20 22:30:36 +00:00
|
|
|
// If fetch from cube, add face explicitly
|
2017-04-25 20:32:16 +00:00
|
|
|
if (is_cube_fetch)
|
2018-05-25 10:43:25 +00:00
|
|
|
{
|
|
|
|
// Special case for cube arrays, face and layer are packed in one dimension.
|
|
|
|
if (imgtype.image.arrayed)
|
2019-01-28 08:39:45 +00:00
|
|
|
farg_str += ", uint(" + to_extract_component_expression(coord, 2) + ") % 6u";
|
2018-05-25 10:43:25 +00:00
|
|
|
else
|
2019-01-28 08:39:45 +00:00
|
|
|
farg_str += ", uint(" + round_fp_tex_coords(to_extract_component_expression(coord, 2), coord_is_fp) + ")";
|
2018-05-25 10:43:25 +00:00
|
|
|
}
|
2017-01-20 22:30:36 +00:00
|
|
|
|
|
|
|
// If array, use alt coord
|
|
|
|
if (imgtype.image.arrayed)
|
2018-05-25 10:43:25 +00:00
|
|
|
{
|
|
|
|
// Special case for cube arrays, face and layer are packed in one dimension.
|
|
|
|
if (imgtype.image.dim == DimCube && is_fetch)
|
2019-01-28 08:39:45 +00:00
|
|
|
farg_str += ", uint(" + to_extract_component_expression(coord, 2) + ") / 6u";
|
2018-05-25 10:43:25 +00:00
|
|
|
else
|
2019-01-28 08:39:45 +00:00
|
|
|
farg_str += ", uint(" +
|
|
|
|
round_fp_tex_coords(to_extract_component_expression(coord, alt_coord_component), coord_is_fp) +
|
|
|
|
")";
|
2018-05-25 10:43:25 +00:00
|
|
|
}
|
2017-01-20 22:30:36 +00:00
|
|
|
|
2016-12-28 23:36:42 +00:00
|
|
|
// Depth compare reference value
|
|
|
|
if (dref)
|
|
|
|
{
|
|
|
|
forward = forward && should_forward(dref);
|
|
|
|
farg_str += ", ";
|
2019-01-28 08:39:45 +00:00
|
|
|
|
|
|
|
if (is_proj)
|
|
|
|
farg_str +=
|
|
|
|
to_enclosed_expression(dref) + " / " + to_extract_component_expression(coord, alt_coord_component);
|
|
|
|
else
|
|
|
|
farg_str += to_expression(dref);
|
2019-01-07 09:01:00 +00:00
|
|
|
|
|
|
|
if (msl_options.is_macos() && (grad_x || grad_y))
|
|
|
|
{
|
|
|
|
// For sample compare, MSL does not support gradient2d for all targets (only iOS apparently according to docs).
|
|
|
|
// However, the most common case here is to have a constant gradient of 0, as that is the only way to express
|
|
|
|
// LOD == 0 in GLSL with sampler2DArrayShadow (cascaded shadow mapping).
|
|
|
|
// We will detect a compile-time constant 0 value for gradient and promote that to level(0) on MSL.
|
|
|
|
bool constant_zero_x = !grad_x || expression_is_constant_null(grad_x);
|
|
|
|
bool constant_zero_y = !grad_y || expression_is_constant_null(grad_y);
|
|
|
|
if (constant_zero_x && constant_zero_y)
|
|
|
|
{
|
|
|
|
lod = 0;
|
|
|
|
grad_x = 0;
|
|
|
|
grad_y = 0;
|
|
|
|
farg_str += ", level(0)";
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2019-01-07 09:01:28 +00:00
|
|
|
SPIRV_CROSS_THROW("Using non-constant 0.0 gradient() qualifier for sample_compare. This is not "
|
|
|
|
"supported in MSL macOS.");
|
2019-01-07 09:01:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (msl_options.is_macos() && bias)
|
|
|
|
{
|
|
|
|
// Bias is not supported either on macOS with sample_compare.
|
|
|
|
// Verify it is compile-time zero, and drop the argument.
|
|
|
|
if (expression_is_constant_null(bias))
|
|
|
|
{
|
|
|
|
bias = 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
SPIRV_CROSS_THROW(
|
2019-01-07 09:01:28 +00:00
|
|
|
"Using non-constant 0.0 bias() qualifier for sample_compare. This is not supported in MSL macOS.");
|
2019-01-07 09:01:00 +00:00
|
|
|
}
|
|
|
|
}
|
2016-12-28 23:36:42 +00:00
|
|
|
}
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
// LOD Options
|
2018-04-03 11:01:52 +00:00
|
|
|
// Metal does not support LOD for 1D textures.
|
|
|
|
if (bias && imgtype.image.dim != Dim1D)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
|
|
|
forward = forward && should_forward(bias);
|
2016-12-28 23:36:42 +00:00
|
|
|
farg_str += ", bias(" + to_expression(bias) + ")";
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
|
|
|
|
2018-04-03 11:01:52 +00:00
|
|
|
// Metal does not support LOD for 1D textures.
|
|
|
|
if (lod && imgtype.image.dim != Dim1D)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
|
|
|
forward = forward && should_forward(lod);
|
2016-12-28 23:36:42 +00:00
|
|
|
if (is_fetch)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
2016-12-28 23:36:42 +00:00
|
|
|
farg_str += ", " + to_expression(lod);
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2016-12-28 23:36:42 +00:00
|
|
|
farg_str += ", level(" + to_expression(lod) + ")";
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
|
|
|
}
|
2018-04-30 08:53:21 +00:00
|
|
|
else if (is_fetch && !lod && imgtype.image.dim != Dim1D && imgtype.image.dim != DimBuffer && !imgtype.image.ms &&
|
|
|
|
imgtype.image.sampled != 2)
|
|
|
|
{
|
|
|
|
// Lod argument is optional in OpImageFetch, but we require a LOD value, pick 0 as the default.
|
|
|
|
// Check for sampled type as well, because is_fetch is also used for OpImageRead in MSL.
|
|
|
|
farg_str += ", 0";
|
|
|
|
}
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2018-04-03 11:01:52 +00:00
|
|
|
// Metal does not support LOD for 1D textures.
|
|
|
|
if ((grad_x || grad_y) && imgtype.image.dim != Dim1D)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
|
|
|
forward = forward && should_forward(grad_x);
|
|
|
|
forward = forward && should_forward(grad_y);
|
|
|
|
string grad_opt;
|
2016-12-28 23:36:42 +00:00
|
|
|
switch (imgtype.image.dim)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
|
|
|
case Dim2D:
|
|
|
|
grad_opt = "2d";
|
|
|
|
break;
|
|
|
|
case Dim3D:
|
|
|
|
grad_opt = "3d";
|
|
|
|
break;
|
|
|
|
case DimCube:
|
|
|
|
grad_opt = "cube";
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
grad_opt = "unsupported_gradient_dimension";
|
|
|
|
break;
|
|
|
|
}
|
2016-12-28 23:36:42 +00:00
|
|
|
farg_str += ", gradient" + grad_opt + "(" + to_expression(grad_x) + ", " + to_expression(grad_y) + ")";
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
|
|
|
|
2019-06-11 09:10:16 +00:00
|
|
|
if (minlod)
|
|
|
|
{
|
|
|
|
if (msl_options.is_macos())
|
|
|
|
{
|
|
|
|
if (!msl_options.supports_msl_version(2, 2))
|
|
|
|
SPIRV_CROSS_THROW("min_lod_clamp() is only supported in MSL 2.2+ and up on macOS.");
|
|
|
|
}
|
|
|
|
else if (msl_options.is_ios())
|
|
|
|
SPIRV_CROSS_THROW("min_lod_clamp() is not supported on iOS.");
|
|
|
|
|
|
|
|
forward = forward && should_forward(minlod);
|
|
|
|
farg_str += ", min_lod_clamp(" + to_expression(minlod) + ")";
|
|
|
|
}
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
// Add offsets
|
|
|
|
string offset_expr;
|
2018-08-07 13:28:04 +00:00
|
|
|
if (coffset && !is_fetch)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
|
|
|
forward = forward && should_forward(coffset);
|
|
|
|
offset_expr = to_expression(coffset);
|
|
|
|
}
|
2018-08-07 13:28:04 +00:00
|
|
|
else if (offset && !is_fetch)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
|
|
|
forward = forward && should_forward(offset);
|
|
|
|
offset_expr = to_expression(offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!offset_expr.empty())
|
|
|
|
{
|
2016-12-28 23:36:42 +00:00
|
|
|
switch (imgtype.image.dim)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
|
|
|
case Dim2D:
|
2017-04-25 15:25:50 +00:00
|
|
|
if (coord_type.vecsize > 2)
|
2018-04-03 11:01:52 +00:00
|
|
|
offset_expr = enclose_expression(offset_expr) + ".xy";
|
2018-04-30 20:30:29 +00:00
|
|
|
|
2016-12-28 23:36:42 +00:00
|
|
|
farg_str += ", " + offset_expr;
|
2016-05-05 07:33:18 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case Dim3D:
|
2017-04-25 15:25:50 +00:00
|
|
|
if (coord_type.vecsize > 3)
|
2018-04-03 11:01:52 +00:00
|
|
|
offset_expr = enclose_expression(offset_expr) + ".xyz";
|
2018-04-30 20:30:29 +00:00
|
|
|
|
2016-12-28 23:36:42 +00:00
|
|
|
farg_str += ", " + offset_expr;
|
2016-05-05 07:33:18 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (comp)
|
|
|
|
{
|
2018-04-30 20:30:29 +00:00
|
|
|
// If 2D has gather component, ensure it also has an offset arg
|
|
|
|
if (imgtype.image.dim == Dim2D && offset_expr.empty())
|
|
|
|
farg_str += ", int2(0)";
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
forward = forward && should_forward(comp);
|
2016-12-28 23:36:42 +00:00
|
|
|
farg_str += ", " + to_component_argument(comp);
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
|
|
|
|
2017-01-20 22:30:36 +00:00
|
|
|
if (sample)
|
|
|
|
{
|
2019-06-11 09:10:16 +00:00
|
|
|
forward = forward && should_forward(sample);
|
2017-01-20 22:30:36 +00:00
|
|
|
farg_str += ", ";
|
|
|
|
farg_str += to_expression(sample);
|
|
|
|
}
|
|
|
|
|
2018-09-29 14:52:26 +00:00
|
|
|
if (msl_options.swizzle_texture_samples && is_sampled_image_type(imgtype))
|
2018-09-20 01:36:33 +00:00
|
|
|
{
|
|
|
|
// Add the swizzle constant from the swizzle buffer.
|
|
|
|
if (!is_gather)
|
|
|
|
farg_str += ")";
|
2019-01-13 23:31:50 +00:00
|
|
|
farg_str += ", " + to_swizzle_expression(img);
|
2019-05-09 10:15:45 +00:00
|
|
|
used_swizzle_buffer = true;
|
2018-09-20 01:36:33 +00:00
|
|
|
}
|
|
|
|
|
2016-12-28 23:36:42 +00:00
|
|
|
*p_forward = forward;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2016-12-28 23:36:42 +00:00
|
|
|
return farg_str;
|
|
|
|
}
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2017-04-25 20:32:16 +00:00
|
|
|
// If the texture coordinates are floating point, invokes MSL round() function to round them.
|
|
|
|
string CompilerMSL::round_fp_tex_coords(string tex_coords, bool coord_is_fp)
|
|
|
|
{
|
|
|
|
return coord_is_fp ? ("round(" + tex_coords + ")") : tex_coords;
|
|
|
|
}
|
|
|
|
|
2016-12-28 23:36:42 +00:00
|
|
|
// Returns a string to use in an image sampling function argument.
|
|
|
|
// The ID must be a scalar constant.
|
|
|
|
string CompilerMSL::to_component_argument(uint32_t id)
|
|
|
|
{
|
2018-10-05 09:30:57 +00:00
|
|
|
if (ir.ids[id].get_type() != TypeConstant)
|
2016-12-28 23:36:42 +00:00
|
|
|
{
|
|
|
|
SPIRV_CROSS_THROW("ID " + to_string(id) + " is not an OpConstant.");
|
|
|
|
return "component::x";
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t component_index = get<SPIRConstant>(id).scalar();
|
|
|
|
switch (component_index)
|
|
|
|
{
|
|
|
|
case 0:
|
|
|
|
return "component::x";
|
|
|
|
case 1:
|
|
|
|
return "component::y";
|
|
|
|
case 2:
|
|
|
|
return "component::z";
|
|
|
|
case 3:
|
|
|
|
return "component::w";
|
|
|
|
|
|
|
|
default:
|
|
|
|
SPIRV_CROSS_THROW("The value (" + to_string(component_index) + ") of OpConstant ID " + to_string(id) +
|
|
|
|
" is not a valid Component index, which must be one of 0, 1, 2, or 3.");
|
|
|
|
return "component::x";
|
|
|
|
}
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2016-04-24 01:47:41 +00:00
|
|
|
// Establish sampled image as expression object and assign the sampler to it.
|
|
|
|
void CompilerMSL::emit_sampled_image_op(uint32_t result_type, uint32_t result_id, uint32_t image_id, uint32_t samp_id)
|
|
|
|
{
|
2018-10-05 07:49:57 +00:00
|
|
|
set<SPIRCombinedImageSampler>(result_id, result_type, image_id, samp_id);
|
2016-04-24 01:47:41 +00:00
|
|
|
}
|
|
|
|
|
2016-07-07 00:30:47 +00:00
|
|
|
// Returns a string representation of the ID, usable as a function arg.
|
|
|
|
// Manufacture automatic sampler arg for SampledImage texture.
|
|
|
|
string CompilerMSL::to_func_call_arg(uint32_t id)
|
|
|
|
{
|
2019-01-14 09:08:35 +00:00
|
|
|
string arg_str;
|
|
|
|
|
|
|
|
auto *c = maybe_get<SPIRConstant>(id);
|
|
|
|
if (c && !get<SPIRType>(c->constant_type).array.empty())
|
|
|
|
{
|
|
|
|
// If we are passing a constant array directly to a function for some reason,
|
|
|
|
// the callee will expect an argument in thread const address space
|
|
|
|
// (since we can only bind to arrays with references in MSL).
|
|
|
|
// To resolve this, we must emit a copy in this address space.
|
|
|
|
// This kind of code gen should be rare enough that performance is not a real concern.
|
|
|
|
// Inline the SPIR-V to avoid this kind of suboptimal codegen.
|
|
|
|
//
|
|
|
|
// We risk calling this inside a continue block (invalid code),
|
|
|
|
// so just create a thread local copy in the current function.
|
|
|
|
arg_str = join("_", id, "_array_copy");
|
|
|
|
auto &constants = current_function->constant_arrays_needed_on_stack;
|
|
|
|
auto itr = find(begin(constants), end(constants), id);
|
|
|
|
if (itr == end(constants))
|
|
|
|
{
|
2019-04-05 10:06:10 +00:00
|
|
|
force_recompile();
|
2019-01-14 09:08:35 +00:00
|
|
|
constants.push_back(id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
arg_str = CompilerGLSL::to_func_call_arg(id);
|
2016-07-07 00:30:47 +00:00
|
|
|
|
2016-11-27 17:34:04 +00:00
|
|
|
// Manufacture automatic sampler arg if the arg is a SampledImage texture.
|
2018-04-03 12:00:34 +00:00
|
|
|
auto &type = expression_type(id);
|
|
|
|
if (type.basetype == SPIRType::SampledImage && type.image.dim != DimBuffer)
|
2019-03-14 09:29:34 +00:00
|
|
|
{
|
|
|
|
// Need to check the base variable in case we need to apply a qualified alias.
|
|
|
|
uint32_t var_id = 0;
|
|
|
|
auto *sampler_var = maybe_get<SPIRVariable>(id);
|
|
|
|
if (sampler_var)
|
|
|
|
var_id = sampler_var->basevariable;
|
|
|
|
|
|
|
|
arg_str += ", " + to_sampler_expression(var_id ? var_id : id);
|
|
|
|
}
|
2019-05-09 10:15:45 +00:00
|
|
|
|
2019-05-27 09:59:29 +00:00
|
|
|
uint32_t var_id = 0;
|
|
|
|
auto *var = maybe_get<SPIRVariable>(id);
|
|
|
|
if (var)
|
|
|
|
var_id = var->basevariable;
|
|
|
|
|
2019-01-13 23:31:50 +00:00
|
|
|
if (msl_options.swizzle_texture_samples && has_sampled_images && is_sampled_image_type(type))
|
2019-05-09 10:15:45 +00:00
|
|
|
{
|
|
|
|
// Need to check the base variable in case we need to apply a qualified alias.
|
|
|
|
arg_str += ", " + to_swizzle_expression(var_id ? var_id : id);
|
|
|
|
}
|
2016-07-07 00:30:47 +00:00
|
|
|
|
2019-05-27 09:59:29 +00:00
|
|
|
if (buffers_requiring_array_length.count(var_id))
|
|
|
|
arg_str += ", " + to_buffer_size_expression(var_id ? var_id : id);
|
|
|
|
|
2016-10-24 13:24:24 +00:00
|
|
|
return arg_str;
|
2016-07-07 00:30:47 +00:00
|
|
|
}
|
|
|
|
|
2016-04-24 01:47:41 +00:00
|
|
|
// If the ID represents a sampled image that has been assigned a sampler already,
|
|
|
|
// generate an expression for the sampler, otherwise generate a fake sampler name
|
|
|
|
// by appending a suffix to the expression constructed from the ID.
|
|
|
|
string CompilerMSL::to_sampler_expression(uint32_t id)
|
|
|
|
{
|
2018-10-05 07:49:57 +00:00
|
|
|
auto *combined = maybe_get<SPIRCombinedImageSampler>(id);
|
|
|
|
auto expr = to_expression(combined ? combined->image : id);
|
2018-04-03 12:00:34 +00:00
|
|
|
auto index = expr.find_first_of('[');
|
2018-10-05 07:49:57 +00:00
|
|
|
|
|
|
|
uint32_t samp_id = 0;
|
|
|
|
if (combined)
|
|
|
|
samp_id = combined->sampler;
|
2018-04-03 12:00:34 +00:00
|
|
|
|
|
|
|
if (index == string::npos)
|
|
|
|
return samp_id ? to_expression(samp_id) : expr + sampler_name_suffix;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
auto image_expr = expr.substr(0, index);
|
|
|
|
auto array_expr = expr.substr(index);
|
|
|
|
return samp_id ? to_expression(samp_id) : (image_expr + sampler_name_suffix + array_expr);
|
|
|
|
}
|
2016-04-24 01:47:41 +00:00
|
|
|
}
|
|
|
|
|
2019-01-13 23:31:50 +00:00
|
|
|
string CompilerMSL::to_swizzle_expression(uint32_t id)
|
|
|
|
{
|
|
|
|
auto *combined = maybe_get<SPIRCombinedImageSampler>(id);
|
2019-05-09 10:15:45 +00:00
|
|
|
|
2019-01-13 23:31:50 +00:00
|
|
|
auto expr = to_expression(combined ? combined->image : id);
|
|
|
|
auto index = expr.find_first_of('[');
|
|
|
|
|
2019-05-09 10:15:45 +00:00
|
|
|
// If an image is part of an argument buffer translate this to a legal identifier.
|
|
|
|
for (auto &c : expr)
|
|
|
|
if (c == '.')
|
|
|
|
c = '_';
|
|
|
|
|
2019-01-13 23:31:50 +00:00
|
|
|
if (index == string::npos)
|
|
|
|
return expr + swizzle_name_suffix;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
auto image_expr = expr.substr(0, index);
|
|
|
|
auto array_expr = expr.substr(index);
|
|
|
|
return image_expr + swizzle_name_suffix + array_expr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-27 09:59:29 +00:00
|
|
|
string CompilerMSL::to_buffer_size_expression(uint32_t id)
|
|
|
|
{
|
|
|
|
auto expr = to_expression(id);
|
|
|
|
auto index = expr.find_first_of('[');
|
|
|
|
|
|
|
|
// This is quite crude, but we need to translate the reference name (*spvDescriptorSetN.name) to
|
|
|
|
// the pointer expression spvDescriptorSetN.name to make a reasonable expression here.
|
|
|
|
// This only happens if we have argument buffers and we are using OpArrayLength on a lone SSBO in that set.
|
|
|
|
if (expr.size() >= 3 && expr[0] == '(' && expr[1] == '*')
|
|
|
|
expr = address_of_expression(expr);
|
|
|
|
|
|
|
|
// If a buffer is part of an argument buffer translate this to a legal identifier.
|
|
|
|
for (auto &c : expr)
|
|
|
|
if (c == '.')
|
|
|
|
c = '_';
|
|
|
|
|
|
|
|
if (index == string::npos)
|
|
|
|
return expr + buffer_size_name_suffix;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
auto buffer_expr = expr.substr(0, index);
|
|
|
|
auto array_expr = expr.substr(index);
|
|
|
|
return buffer_expr + buffer_size_name_suffix + array_expr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-15 23:21:38 +00:00
|
|
|
// Checks whether the type is a Block all of whose members have DecorationPatch.
|
|
|
|
bool CompilerMSL::is_patch_block(const SPIRType &type)
|
|
|
|
{
|
|
|
|
if (!has_decoration(type.self, DecorationBlock))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < type.member_types.size(); i++)
|
|
|
|
{
|
|
|
|
if (!has_member_decoration(type.self, i, DecorationPatch))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-01-04 21:33:45 +00:00
|
|
|
// Checks whether the ID is a row_major matrix that requires conversion before use
|
|
|
|
bool CompilerMSL::is_non_native_row_major_matrix(uint32_t id)
|
|
|
|
{
|
|
|
|
// Natively supported row-major matrices do not need to be converted.
|
2018-01-06 04:22:36 +00:00
|
|
|
if (backend.native_row_major_matrix)
|
2018-01-04 21:33:45 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Non-matrix or column-major matrix types do not need to be converted.
|
2019-02-20 11:19:00 +00:00
|
|
|
if (!has_decoration(id, DecorationRowMajor))
|
2018-01-04 21:33:45 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Generate a function that will swap matrix elements from row-major to column-major.
|
2018-02-11 21:52:57 +00:00
|
|
|
// Packed row-matrix should just use transpose() function.
|
2019-01-17 10:22:24 +00:00
|
|
|
if (!has_extended_decoration(id, SPIRVCrossDecorationPacked))
|
2018-02-11 21:52:57 +00:00
|
|
|
{
|
|
|
|
const auto type = expression_type(id);
|
|
|
|
add_convert_row_major_matrix_function(type.columns, type.vecsize);
|
|
|
|
}
|
|
|
|
|
2018-01-04 21:33:45 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Checks whether the member is a row_major matrix that requires conversion before use
|
|
|
|
bool CompilerMSL::member_is_non_native_row_major_matrix(const SPIRType &type, uint32_t index)
|
|
|
|
{
|
|
|
|
// Natively supported row-major matrices do not need to be converted.
|
2018-01-06 04:22:36 +00:00
|
|
|
if (backend.native_row_major_matrix)
|
2018-01-04 21:33:45 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Non-matrix or column-major matrix types do not need to be converted.
|
2019-02-20 11:19:00 +00:00
|
|
|
if (!has_member_decoration(type.self, index, DecorationRowMajor))
|
2018-01-04 21:33:45 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Generate a function that will swap matrix elements from row-major to column-major.
|
2018-02-11 21:52:57 +00:00
|
|
|
// Packed row-matrix should just use transpose() function.
|
2019-01-17 10:22:24 +00:00
|
|
|
if (!has_extended_member_decoration(type.self, index, SPIRVCrossDecorationPacked))
|
2018-02-11 21:52:57 +00:00
|
|
|
{
|
|
|
|
const auto mbr_type = get<SPIRType>(type.member_types[index]);
|
|
|
|
add_convert_row_major_matrix_function(mbr_type.columns, mbr_type.vecsize);
|
|
|
|
}
|
|
|
|
|
2018-01-04 21:33:45 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adds a function suitable for converting a non-square row-major matrix to a column-major matrix.
|
|
|
|
void CompilerMSL::add_convert_row_major_matrix_function(uint32_t cols, uint32_t rows)
|
|
|
|
{
|
|
|
|
SPVFuncImpl spv_func;
|
|
|
|
if (cols == rows) // Square matrix...just use transpose() function
|
|
|
|
return;
|
|
|
|
else if (cols == 2 && rows == 3)
|
|
|
|
spv_func = SPVFuncImplRowMajor2x3;
|
|
|
|
else if (cols == 2 && rows == 4)
|
|
|
|
spv_func = SPVFuncImplRowMajor2x4;
|
|
|
|
else if (cols == 3 && rows == 2)
|
|
|
|
spv_func = SPVFuncImplRowMajor3x2;
|
|
|
|
else if (cols == 3 && rows == 4)
|
|
|
|
spv_func = SPVFuncImplRowMajor3x4;
|
|
|
|
else if (cols == 4 && rows == 2)
|
|
|
|
spv_func = SPVFuncImplRowMajor4x2;
|
|
|
|
else if (cols == 4 && rows == 3)
|
|
|
|
spv_func = SPVFuncImplRowMajor4x3;
|
|
|
|
else
|
|
|
|
SPIRV_CROSS_THROW("Could not convert row-major matrix.");
|
|
|
|
|
|
|
|
auto rslt = spv_function_implementations.insert(spv_func);
|
|
|
|
if (rslt.second)
|
2018-05-04 07:43:34 +00:00
|
|
|
{
|
2019-04-09 10:28:46 +00:00
|
|
|
suppress_missing_prototypes = true;
|
2019-04-05 10:06:10 +00:00
|
|
|
force_recompile();
|
2018-05-04 07:43:34 +00:00
|
|
|
}
|
2018-01-04 21:33:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Wraps the expression string in a function call that converts the
|
|
|
|
// row_major matrix result of the expression to a column_major matrix.
|
2018-02-11 21:52:57 +00:00
|
|
|
string CompilerMSL::convert_row_major_matrix(string exp_str, const SPIRType &exp_type, bool is_packed)
|
2018-01-04 21:33:45 +00:00
|
|
|
{
|
|
|
|
strip_enclosed_expression(exp_str);
|
|
|
|
|
|
|
|
string func_name;
|
2018-02-11 21:52:57 +00:00
|
|
|
|
|
|
|
// Square and packed matrices can just use transpose
|
|
|
|
if (exp_type.columns == exp_type.vecsize || is_packed)
|
2018-01-04 21:33:45 +00:00
|
|
|
func_name = "transpose";
|
|
|
|
else
|
|
|
|
func_name = string("spvConvertFromRowMajor") + to_string(exp_type.columns) + "x" + to_string(exp_type.vecsize);
|
|
|
|
|
|
|
|
return join(func_name, "(", exp_str, ")");
|
|
|
|
}
|
|
|
|
|
2016-04-06 21:42:27 +00:00
|
|
|
// Called automatically at the end of the entry point function
|
|
|
|
void CompilerMSL::emit_fixup()
|
|
|
|
{
|
2019-02-21 22:57:27 +00:00
|
|
|
if ((get_execution_model() == ExecutionModelVertex ||
|
|
|
|
get_execution_model() == ExecutionModelTessellationEvaluation) &&
|
2019-02-21 23:05:08 +00:00
|
|
|
stage_out_var_id && !qual_pos_var_name.empty() && !capture_output_to_buffer)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
2018-03-09 14:25:25 +00:00
|
|
|
if (options.vertex.fixup_clipspace)
|
2017-01-15 15:39:03 +00:00
|
|
|
statement(qual_pos_var_name, ".z = (", qual_pos_var_name, ".z + ", qual_pos_var_name,
|
|
|
|
".w) * 0.5; // Adjust clip-space for Metal");
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2018-03-09 14:25:25 +00:00
|
|
|
if (options.vertex.flip_vert_y)
|
2016-05-05 07:33:18 +00:00
|
|
|
statement(qual_pos_var_name, ".y = -(", qual_pos_var_name, ".y);", " // Invert Y-axis for Metal");
|
|
|
|
}
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-08 22:33:32 +00:00
|
|
|
// Return a string defining a structure member, with padding and packing.
|
|
|
|
string CompilerMSL::to_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index,
|
|
|
|
const string &qualifier)
|
2016-04-06 21:42:27 +00:00
|
|
|
{
|
2017-03-01 02:44:36 +00:00
|
|
|
auto &membertype = get<SPIRType>(member_type_id);
|
|
|
|
|
|
|
|
// If this member requires padding to maintain alignment, emit a dummy padding member.
|
|
|
|
MSLStructMemberKey key = get_struct_member_key(type.self, index);
|
|
|
|
uint32_t pad_len = struct_member_padding[key];
|
|
|
|
if (pad_len > 0)
|
2019-01-28 10:02:28 +00:00
|
|
|
statement("char _m", index, "_pad", "[", to_string(pad_len), "];");
|
2017-03-01 02:44:36 +00:00
|
|
|
|
|
|
|
// If this member is packed, mark it as so.
|
2018-02-11 21:52:57 +00:00
|
|
|
string pack_pfx = "";
|
2019-01-16 15:16:39 +00:00
|
|
|
|
|
|
|
const SPIRType *effective_membertype = &membertype;
|
|
|
|
SPIRType override_type;
|
|
|
|
|
2019-03-14 09:29:34 +00:00
|
|
|
uint32_t orig_id = 0;
|
|
|
|
if (has_extended_member_decoration(type.self, index, SPIRVCrossDecorationInterfaceOrigID))
|
|
|
|
orig_id = get_extended_member_decoration(type.self, index, SPIRVCrossDecorationInterfaceOrigID);
|
|
|
|
|
2018-02-11 21:52:57 +00:00
|
|
|
if (member_is_packed_type(type, index))
|
|
|
|
{
|
|
|
|
// If we're packing a matrix, output an appropriate typedef
|
2019-01-28 10:32:06 +00:00
|
|
|
if (membertype.basetype == SPIRType::Struct)
|
|
|
|
{
|
|
|
|
pack_pfx = "/* FIXME: A padded struct is needed here. If you see this message, file a bug! */ ";
|
|
|
|
}
|
|
|
|
else if (membertype.vecsize > 1 && membertype.columns > 1)
|
2018-02-11 21:52:57 +00:00
|
|
|
{
|
2019-01-16 15:16:39 +00:00
|
|
|
pack_pfx = "packed_";
|
2018-02-11 21:52:57 +00:00
|
|
|
string base_type = membertype.width == 16 ? "half" : "float";
|
|
|
|
string td_line = "typedef ";
|
|
|
|
td_line += base_type + to_string(membertype.vecsize) + "x" + to_string(membertype.columns);
|
|
|
|
td_line += " " + pack_pfx;
|
|
|
|
td_line += base_type + to_string(membertype.columns) + "x" + to_string(membertype.vecsize);
|
|
|
|
td_line += ";";
|
|
|
|
add_typedef_line(td_line);
|
|
|
|
}
|
2019-01-17 09:06:23 +00:00
|
|
|
else if (is_array(membertype) && membertype.vecsize <= 2 && membertype.basetype != SPIRType::Struct)
|
2019-01-16 15:16:39 +00:00
|
|
|
{
|
|
|
|
// A "packed" float array, but we pad here instead to 4-vector.
|
|
|
|
override_type = membertype;
|
|
|
|
override_type.vecsize = 4;
|
|
|
|
effective_membertype = &override_type;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
pack_pfx = "packed_";
|
2018-02-11 21:52:57 +00:00
|
|
|
}
|
2017-03-01 02:44:36 +00:00
|
|
|
|
2019-03-15 20:53:21 +00:00
|
|
|
// Very specifically, image load-store in argument buffers are disallowed on MSL on iOS.
|
|
|
|
if (msl_options.is_ios() && membertype.basetype == SPIRType::Image && membertype.image.sampled == 2)
|
|
|
|
{
|
|
|
|
if (!has_decoration(orig_id, DecorationNonWritable))
|
|
|
|
SPIRV_CROSS_THROW("Writable images are not allowed in argument buffers on iOS.");
|
|
|
|
}
|
|
|
|
|
2019-03-14 09:29:34 +00:00
|
|
|
// Array information is baked into these types.
|
|
|
|
string array_type;
|
|
|
|
if (membertype.basetype != SPIRType::Image && membertype.basetype != SPIRType::Sampler &&
|
|
|
|
membertype.basetype != SPIRType::SampledImage)
|
|
|
|
{
|
|
|
|
array_type = type_to_array_glsl(membertype);
|
|
|
|
}
|
|
|
|
|
|
|
|
return join(pack_pfx, type_to_glsl(*effective_membertype, orig_id), " ", qualifier, to_member_name(type, index),
|
|
|
|
member_attribute_qualifier(type, index), array_type, ";");
|
2019-01-08 22:33:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Emit a structure member, padding and packing to maintain the correct memeber alignments.
|
|
|
|
void CompilerMSL::emit_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index,
|
|
|
|
const string &qualifier, uint32_t)
|
|
|
|
{
|
|
|
|
statement(to_struct_member(type, member_type_id, index, qualifier));
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Return a MSL qualifier for the specified function attribute member
|
2016-05-05 07:33:18 +00:00
|
|
|
string CompilerMSL::member_attribute_qualifier(const SPIRType &type, uint32_t index)
|
|
|
|
{
|
2016-07-28 09:16:02 +00:00
|
|
|
auto &execution = get_entry_point();
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2017-05-23 17:24:41 +00:00
|
|
|
uint32_t mbr_type_id = type.member_types[index];
|
|
|
|
auto &mbr_type = get<SPIRType>(mbr_type_id);
|
|
|
|
|
2019-01-08 10:03:59 +00:00
|
|
|
BuiltIn builtin = BuiltInMax;
|
2016-05-05 07:33:18 +00:00
|
|
|
bool is_builtin = is_member_builtin(type, index, &builtin);
|
|
|
|
|
2019-03-14 09:29:34 +00:00
|
|
|
if (has_extended_member_decoration(type.self, index, SPIRVCrossDecorationArgumentBufferID))
|
|
|
|
return join(" [[id(", get_extended_member_decoration(type.self, index, SPIRVCrossDecorationArgumentBufferID),
|
|
|
|
")]]");
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
// Vertex function inputs
|
|
|
|
if (execution.model == ExecutionModelVertex && type.storage == StorageClassInput)
|
|
|
|
{
|
|
|
|
if (is_builtin)
|
|
|
|
{
|
|
|
|
switch (builtin)
|
|
|
|
{
|
|
|
|
case BuiltInVertexId:
|
|
|
|
case BuiltInVertexIndex:
|
2018-08-28 18:47:29 +00:00
|
|
|
case BuiltInBaseVertex:
|
2016-05-05 07:33:18 +00:00
|
|
|
case BuiltInInstanceId:
|
|
|
|
case BuiltInInstanceIndex:
|
2018-08-28 18:47:29 +00:00
|
|
|
case BuiltInBaseInstance:
|
2016-05-05 07:33:18 +00:00
|
|
|
return string(" [[") + builtin_qualifier(builtin) + "]]";
|
|
|
|
|
2018-08-29 17:05:33 +00:00
|
|
|
case BuiltInDrawIndex:
|
|
|
|
SPIRV_CROSS_THROW("DrawIndex is not supported in MSL.");
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
default:
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
uint32_t locn = get_ordered_member_location(type.self, index);
|
2017-01-29 18:28:20 +00:00
|
|
|
if (locn != k_unknown_location)
|
|
|
|
return string(" [[attribute(") + convert_to_string(locn) + ")]]";
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
|
|
|
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
// Vertex and tessellation evaluation function outputs
|
|
|
|
if ((execution.model == ExecutionModelVertex || execution.model == ExecutionModelTessellationEvaluation) &&
|
|
|
|
type.storage == StorageClassOutput)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
|
|
|
if (is_builtin)
|
|
|
|
{
|
|
|
|
switch (builtin)
|
|
|
|
{
|
2017-05-24 13:31:38 +00:00
|
|
|
case BuiltInPointSize:
|
|
|
|
// Only mark the PointSize builtin if really rendering points.
|
|
|
|
// Some shaders may include a PointSize builtin even when used to render
|
|
|
|
// non-point topologies, and Metal will reject this builtin when compiling
|
|
|
|
// the shader into a render pipeline that uses a non-point topology.
|
2018-03-09 14:25:25 +00:00
|
|
|
return msl_options.enable_point_size_builtin ? (string(" [[") + builtin_qualifier(builtin) + "]]") : "";
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2018-09-12 23:00:17 +00:00
|
|
|
case BuiltInViewportIndex:
|
|
|
|
if (!msl_options.supports_msl_version(2, 0))
|
|
|
|
SPIRV_CROSS_THROW("ViewportIndex requires Metal 2.0.");
|
|
|
|
/* fallthrough */
|
2016-05-05 07:33:18 +00:00
|
|
|
case BuiltInPosition:
|
|
|
|
case BuiltInLayer:
|
2017-05-23 17:24:41 +00:00
|
|
|
case BuiltInClipDistance:
|
|
|
|
return string(" [[") + builtin_qualifier(builtin) + "]]" + (mbr_type.array.empty() ? "" : " ");
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
}
|
2018-09-05 22:31:10 +00:00
|
|
|
uint32_t comp;
|
|
|
|
uint32_t locn = get_ordered_member_location(type.self, index, &comp);
|
2017-01-29 18:28:20 +00:00
|
|
|
if (locn != k_unknown_location)
|
2018-09-05 22:31:10 +00:00
|
|
|
{
|
|
|
|
if (comp != k_unknown_component)
|
|
|
|
return string(" [[user(locn") + convert_to_string(locn) + "_" + convert_to_string(comp) + ")]]";
|
|
|
|
else
|
|
|
|
return string(" [[user(locn") + convert_to_string(locn) + ")]]";
|
|
|
|
}
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
// Tessellation control function inputs
|
|
|
|
if (execution.model == ExecutionModelTessellationControl && type.storage == StorageClassInput)
|
|
|
|
{
|
|
|
|
if (is_builtin)
|
|
|
|
{
|
|
|
|
switch (builtin)
|
|
|
|
{
|
|
|
|
case BuiltInInvocationId:
|
|
|
|
case BuiltInPrimitiveId:
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
case BuiltInSubgroupLocalInvocationId: // FIXME: Should work in any stage
|
|
|
|
case BuiltInSubgroupSize: // FIXME: Should work in any stage
|
2019-02-04 05:58:46 +00:00
|
|
|
return string(" [[") + builtin_qualifier(builtin) + "]]" + (mbr_type.array.empty() ? "" : " ");
|
|
|
|
case BuiltInPatchVertices:
|
|
|
|
return "";
|
|
|
|
// Others come from stage input.
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
uint32_t locn = get_ordered_member_location(type.self, index);
|
|
|
|
if (locn != k_unknown_location)
|
|
|
|
return string(" [[attribute(") + convert_to_string(locn) + ")]]";
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tessellation control function outputs
|
|
|
|
if (execution.model == ExecutionModelTessellationControl && type.storage == StorageClassOutput)
|
|
|
|
{
|
|
|
|
// For this type of shader, we always arrange for it to capture its
|
|
|
|
// output to a buffer. For this reason, qualifiers are irrelevant here.
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
// Tessellation evaluation function inputs
|
|
|
|
if (execution.model == ExecutionModelTessellationEvaluation && type.storage == StorageClassInput)
|
|
|
|
{
|
|
|
|
if (is_builtin)
|
|
|
|
{
|
|
|
|
switch (builtin)
|
|
|
|
{
|
|
|
|
case BuiltInPrimitiveId:
|
|
|
|
case BuiltInTessCoord:
|
|
|
|
return string(" [[") + builtin_qualifier(builtin) + "]]";
|
|
|
|
case BuiltInPatchVertices:
|
|
|
|
return "";
|
|
|
|
// Others come from stage input.
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// The special control point array must not be marked with an attribute.
|
|
|
|
if (get_type(type.member_types[index]).basetype == SPIRType::ControlPointArray)
|
|
|
|
return "";
|
|
|
|
uint32_t locn = get_ordered_member_location(type.self, index);
|
|
|
|
if (locn != k_unknown_location)
|
|
|
|
return string(" [[attribute(") + convert_to_string(locn) + ")]]";
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tessellation evaluation function outputs were handled above.
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
// Fragment function inputs
|
|
|
|
if (execution.model == ExecutionModelFragment && type.storage == StorageClassInput)
|
|
|
|
{
|
2019-06-13 09:33:40 +00:00
|
|
|
string quals;
|
2016-05-05 07:33:18 +00:00
|
|
|
if (is_builtin)
|
|
|
|
{
|
|
|
|
switch (builtin)
|
|
|
|
{
|
|
|
|
case BuiltInFrontFacing:
|
|
|
|
case BuiltInPointCoord:
|
2016-07-04 15:49:46 +00:00
|
|
|
case BuiltInFragCoord:
|
2016-05-05 07:33:18 +00:00
|
|
|
case BuiltInSampleId:
|
|
|
|
case BuiltInSampleMask:
|
|
|
|
case BuiltInLayer:
|
2019-06-13 09:33:40 +00:00
|
|
|
case BuiltInBaryCoordNV:
|
|
|
|
case BuiltInBaryCoordNoPerspNV:
|
2018-09-04 21:08:22 +00:00
|
|
|
quals = builtin_qualifier(builtin);
|
2019-06-13 09:33:40 +00:00
|
|
|
break;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
default:
|
2018-09-04 21:08:22 +00:00
|
|
|
break;
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
|
|
|
}
|
2018-09-04 21:08:22 +00:00
|
|
|
else
|
|
|
|
{
|
2018-09-05 22:31:10 +00:00
|
|
|
uint32_t comp;
|
|
|
|
uint32_t locn = get_ordered_member_location(type.self, index, &comp);
|
2018-09-04 21:08:22 +00:00
|
|
|
if (locn != k_unknown_location)
|
2018-09-05 22:31:10 +00:00
|
|
|
{
|
|
|
|
if (comp != k_unknown_component)
|
|
|
|
quals = string("user(locn") + convert_to_string(locn) + "_" + convert_to_string(comp) + ")";
|
|
|
|
else
|
|
|
|
quals = string("user(locn") + convert_to_string(locn) + ")";
|
|
|
|
}
|
2018-09-04 21:08:22 +00:00
|
|
|
}
|
2019-06-13 09:33:40 +00:00
|
|
|
|
|
|
|
if (builtin == BuiltInBaryCoordNV || builtin == BuiltInBaryCoordNoPerspNV)
|
|
|
|
{
|
|
|
|
if (has_member_decoration(type.self, index, DecorationFlat) ||
|
|
|
|
has_member_decoration(type.self, index, DecorationCentroid) ||
|
|
|
|
has_member_decoration(type.self, index, DecorationSample) ||
|
|
|
|
has_member_decoration(type.self, index, DecorationNoPerspective))
|
|
|
|
{
|
|
|
|
// NoPerspective is baked into the builtin type.
|
|
|
|
SPIRV_CROSS_THROW("Flat, Centroid, Sample, NoPerspective decorations are not supported for BaryCoord inputs.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-04 21:08:22 +00:00
|
|
|
// Don't bother decorating integers with the 'flat' attribute; it's
|
|
|
|
// the default (in fact, the only option). Also don't bother with the
|
|
|
|
// FragCoord builtin; it's always noperspective on Metal.
|
|
|
|
if (!type_is_integral(mbr_type) && (!is_builtin || builtin != BuiltInFragCoord))
|
|
|
|
{
|
|
|
|
if (has_member_decoration(type.self, index, DecorationFlat))
|
|
|
|
{
|
|
|
|
if (!quals.empty())
|
|
|
|
quals += ", ";
|
|
|
|
quals += "flat";
|
|
|
|
}
|
|
|
|
else if (has_member_decoration(type.self, index, DecorationCentroid))
|
|
|
|
{
|
|
|
|
if (!quals.empty())
|
|
|
|
quals += ", ";
|
|
|
|
if (has_member_decoration(type.self, index, DecorationNoPerspective))
|
|
|
|
quals += "centroid_no_perspective";
|
|
|
|
else
|
|
|
|
quals += "centroid_perspective";
|
|
|
|
}
|
|
|
|
else if (has_member_decoration(type.self, index, DecorationSample))
|
|
|
|
{
|
|
|
|
if (!quals.empty())
|
|
|
|
quals += ", ";
|
|
|
|
if (has_member_decoration(type.self, index, DecorationNoPerspective))
|
|
|
|
quals += "sample_no_perspective";
|
|
|
|
else
|
|
|
|
quals += "sample_perspective";
|
|
|
|
}
|
|
|
|
else if (has_member_decoration(type.self, index, DecorationNoPerspective))
|
|
|
|
{
|
|
|
|
if (!quals.empty())
|
|
|
|
quals += ", ";
|
|
|
|
quals += "center_no_perspective";
|
|
|
|
}
|
|
|
|
}
|
2019-06-13 09:33:40 +00:00
|
|
|
|
2018-09-04 21:08:22 +00:00
|
|
|
if (!quals.empty())
|
|
|
|
return " [[" + quals + "]]";
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fragment function outputs
|
|
|
|
if (execution.model == ExecutionModelFragment && type.storage == StorageClassOutput)
|
|
|
|
{
|
|
|
|
if (is_builtin)
|
|
|
|
{
|
|
|
|
switch (builtin)
|
|
|
|
{
|
2019-06-12 08:06:59 +00:00
|
|
|
case BuiltInFragStencilRefEXT:
|
|
|
|
if (!msl_options.supports_msl_version(2, 1))
|
|
|
|
SPIRV_CROSS_THROW("Stencil export only supported in MSL 2.1 and up.");
|
|
|
|
return string(" [[") + builtin_qualifier(builtin) + "]]";
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
case BuiltInSampleMask:
|
|
|
|
case BuiltInFragDepth:
|
|
|
|
return string(" [[") + builtin_qualifier(builtin) + "]]";
|
|
|
|
|
|
|
|
default:
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
}
|
2016-10-01 06:46:43 +00:00
|
|
|
uint32_t locn = get_ordered_member_location(type.self, index);
|
2018-04-03 13:56:22 +00:00
|
|
|
if (locn != k_unknown_location && has_member_decoration(type.self, index, DecorationIndex))
|
2018-04-10 10:32:14 +00:00
|
|
|
return join(" [[color(", locn, "), index(", get_member_decoration(type.self, index, DecorationIndex),
|
|
|
|
")]]");
|
2018-04-03 13:56:22 +00:00
|
|
|
else if (locn != k_unknown_location)
|
|
|
|
return join(" [[color(", locn, ")]]");
|
|
|
|
else if (has_member_decoration(type.self, index, DecorationIndex))
|
|
|
|
return join(" [[index(", get_member_decoration(type.self, index, DecorationIndex), ")]]");
|
|
|
|
else
|
|
|
|
return "";
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
|
|
|
|
2017-01-22 18:09:04 +00:00
|
|
|
// Compute function inputs
|
|
|
|
if (execution.model == ExecutionModelGLCompute && type.storage == StorageClassInput)
|
|
|
|
{
|
|
|
|
if (is_builtin)
|
|
|
|
{
|
|
|
|
switch (builtin)
|
|
|
|
{
|
|
|
|
case BuiltInGlobalInvocationId:
|
2017-09-29 10:16:53 +00:00
|
|
|
case BuiltInWorkgroupId:
|
|
|
|
case BuiltInNumWorkgroups:
|
2017-01-27 01:22:31 +00:00
|
|
|
case BuiltInLocalInvocationId:
|
|
|
|
case BuiltInLocalInvocationIndex:
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
case BuiltInNumSubgroups:
|
|
|
|
case BuiltInSubgroupId:
|
|
|
|
case BuiltInSubgroupLocalInvocationId: // FIXME: Should work in any stage
|
|
|
|
case BuiltInSubgroupSize: // FIXME: Should work in any stage
|
2017-01-22 18:09:04 +00:00
|
|
|
return string(" [[") + builtin_qualifier(builtin) + "]]";
|
|
|
|
|
|
|
|
default:
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
return "";
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2016-12-04 17:32:58 +00:00
|
|
|
// Returns the location decoration of the member with the specified index in the specified type.
|
|
|
|
// If the location of the member has been explicitly set, that location is used. If not, this
|
|
|
|
// function assumes the members are ordered in their location order, and simply returns the
|
|
|
|
// index as the location.
|
2018-09-05 22:31:10 +00:00
|
|
|
uint32_t CompilerMSL::get_ordered_member_location(uint32_t type_id, uint32_t index, uint32_t *comp)
|
2016-04-06 21:42:27 +00:00
|
|
|
{
|
2019-01-10 13:04:01 +00:00
|
|
|
auto &m = ir.meta[type_id];
|
2016-12-04 17:32:58 +00:00
|
|
|
if (index < m.members.size())
|
|
|
|
{
|
|
|
|
auto &dec = m.members[index];
|
2018-09-05 22:31:10 +00:00
|
|
|
if (comp)
|
|
|
|
{
|
|
|
|
if (dec.decoration_flags.get(DecorationComponent))
|
|
|
|
*comp = dec.component;
|
|
|
|
else
|
|
|
|
*comp = k_unknown_component;
|
|
|
|
}
|
2018-03-12 12:09:25 +00:00
|
|
|
if (dec.decoration_flags.get(DecorationLocation))
|
2016-12-04 17:32:58 +00:00
|
|
|
return dec.location;
|
|
|
|
}
|
|
|
|
|
|
|
|
return index;
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Returns the type declaration for a function, including the
|
|
|
|
// entry type if the current function is the entry point function
|
2016-05-05 07:33:18 +00:00
|
|
|
string CompilerMSL::func_type_decl(SPIRType &type)
|
2016-04-06 21:42:27 +00:00
|
|
|
{
|
2016-05-05 07:33:18 +00:00
|
|
|
// The regular function return type. If not processing the entry point function, that's all we need
|
2018-02-05 08:34:54 +00:00
|
|
|
string return_type = type_to_glsl(type) + type_to_array_glsl(type);
|
2016-05-05 07:33:18 +00:00
|
|
|
if (!processing_entry_point)
|
|
|
|
return return_type;
|
|
|
|
|
2018-07-26 04:50:33 +00:00
|
|
|
// If an outgoing interface block has been defined, and it should be returned, override the entry point return type
|
2018-07-26 20:40:32 +00:00
|
|
|
bool ep_should_return_output = !get_is_rasterization_disabled();
|
2018-07-26 04:50:33 +00:00
|
|
|
if (stage_out_var_id && ep_should_return_output)
|
2019-01-08 22:33:32 +00:00
|
|
|
return_type = type_to_glsl(get_stage_out_struct_type()) + type_to_array_glsl(type);
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
// Prepend a entry type, based on the execution model
|
|
|
|
string entry_type;
|
2018-07-26 04:50:33 +00:00
|
|
|
auto &execution = get_entry_point();
|
2016-10-24 13:24:24 +00:00
|
|
|
switch (execution.model)
|
|
|
|
{
|
|
|
|
case ExecutionModelVertex:
|
|
|
|
entry_type = "vertex";
|
|
|
|
break;
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
case ExecutionModelTessellationEvaluation:
|
|
|
|
if (!msl_options.supports_msl_version(1, 2))
|
|
|
|
SPIRV_CROSS_THROW("Tessellation requires Metal 1.2.");
|
|
|
|
if (execution.flags.get(ExecutionModeIsolines))
|
|
|
|
SPIRV_CROSS_THROW("Metal does not support isoline tessellation.");
|
|
|
|
if (msl_options.is_ios())
|
|
|
|
entry_type =
|
|
|
|
join("[[ patch(", execution.flags.get(ExecutionModeTriangles) ? "triangle" : "quad", ") ]] vertex");
|
|
|
|
else
|
|
|
|
entry_type = join("[[ patch(", execution.flags.get(ExecutionModeTriangles) ? "triangle" : "quad", ", ",
|
|
|
|
execution.output_vertices, ") ]] vertex");
|
|
|
|
break;
|
2016-10-24 13:24:24 +00:00
|
|
|
case ExecutionModelFragment:
|
2018-03-13 13:05:33 +00:00
|
|
|
entry_type =
|
2018-11-07 11:54:19 +00:00
|
|
|
execution.flags.get(ExecutionModeEarlyFragmentTests) ? "[[ early_fragment_tests ]] fragment" : "fragment";
|
2016-10-24 13:24:24 +00:00
|
|
|
break;
|
2019-02-04 05:58:46 +00:00
|
|
|
case ExecutionModelTessellationControl:
|
|
|
|
if (!msl_options.supports_msl_version(1, 2))
|
|
|
|
SPIRV_CROSS_THROW("Tessellation requires Metal 1.2.");
|
2019-02-11 23:21:36 +00:00
|
|
|
if (execution.flags.get(ExecutionModeIsolines))
|
|
|
|
SPIRV_CROSS_THROW("Metal does not support isoline tessellation.");
|
2019-02-04 05:58:46 +00:00
|
|
|
/* fallthrough */
|
2016-10-24 13:24:24 +00:00
|
|
|
case ExecutionModelGLCompute:
|
|
|
|
case ExecutionModelKernel:
|
|
|
|
entry_type = "kernel";
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
entry_type = "unknown";
|
|
|
|
break;
|
|
|
|
}
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
return entry_type + " " + return_type;
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
// In MSL, address space qualifiers are required for all pointer or reference variables
|
2017-01-27 00:40:56 +00:00
|
|
|
string CompilerMSL::get_argument_address_space(const SPIRVariable &argument)
|
|
|
|
{
|
|
|
|
const auto &type = get<SPIRType>(argument.basetype);
|
|
|
|
|
2018-01-24 20:38:17 +00:00
|
|
|
switch (type.storage)
|
2017-01-27 00:40:56 +00:00
|
|
|
{
|
2018-01-24 20:38:17 +00:00
|
|
|
case StorageClassWorkgroup:
|
|
|
|
return "threadgroup";
|
|
|
|
|
|
|
|
case StorageClassStorageBuffer:
|
2018-05-25 08:03:46 +00:00
|
|
|
{
|
2019-03-28 09:16:46 +00:00
|
|
|
// For arguments from variable pointers, we use the write count deduction, so
|
|
|
|
// we should not assume any constness here. Only for global SSBOs.
|
|
|
|
bool readonly = false;
|
|
|
|
if (has_decoration(type.self, DecorationBlock))
|
|
|
|
readonly = ir.get_buffer_block_flags(argument).get(DecorationNonWritable);
|
|
|
|
|
2018-07-27 20:53:36 +00:00
|
|
|
return readonly ? "const device" : "device";
|
2018-05-25 08:03:46 +00:00
|
|
|
}
|
2018-01-24 20:38:17 +00:00
|
|
|
|
|
|
|
case StorageClassUniform:
|
|
|
|
case StorageClassUniformConstant:
|
|
|
|
case StorageClassPushConstant:
|
|
|
|
if (type.basetype == SPIRType::Struct)
|
2018-05-25 08:03:46 +00:00
|
|
|
{
|
|
|
|
bool ssbo = has_decoration(type.self, DecorationBufferBlock);
|
2018-07-27 20:53:36 +00:00
|
|
|
if (ssbo)
|
2018-05-25 08:03:46 +00:00
|
|
|
{
|
2018-10-05 09:30:57 +00:00
|
|
|
bool readonly = ir.get_buffer_block_flags(argument).get(DecorationNonWritable);
|
2018-05-25 08:03:46 +00:00
|
|
|
return readonly ? "const device" : "device";
|
|
|
|
}
|
2018-07-27 20:53:36 +00:00
|
|
|
else
|
|
|
|
return "constant";
|
2018-05-25 08:03:46 +00:00
|
|
|
}
|
2018-01-24 20:38:17 +00:00
|
|
|
break;
|
|
|
|
|
2018-08-06 13:41:10 +00:00
|
|
|
case StorageClassFunction:
|
|
|
|
case StorageClassGeneric:
|
|
|
|
// No address space for plain values.
|
|
|
|
return type.pointer ? "thread" : "";
|
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
case StorageClassInput:
|
|
|
|
if (get_execution_model() == ExecutionModelTessellationControl && argument.basevariable == stage_in_ptr_var_id)
|
|
|
|
return "threadgroup";
|
|
|
|
break;
|
|
|
|
|
2019-01-08 22:33:32 +00:00
|
|
|
case StorageClassOutput:
|
|
|
|
if (capture_output_to_buffer)
|
|
|
|
return "device";
|
|
|
|
break;
|
|
|
|
|
2018-01-24 20:38:17 +00:00
|
|
|
default:
|
|
|
|
break;
|
2017-01-27 00:40:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return "thread";
|
|
|
|
}
|
|
|
|
|
2019-03-15 11:05:35 +00:00
|
|
|
string CompilerMSL::get_type_address_space(const SPIRType &type, uint32_t id)
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
{
|
|
|
|
switch (type.storage)
|
|
|
|
{
|
|
|
|
case StorageClassWorkgroup:
|
|
|
|
return "threadgroup";
|
|
|
|
|
|
|
|
case StorageClassStorageBuffer:
|
2019-03-15 11:05:35 +00:00
|
|
|
{
|
2019-03-15 11:27:54 +00:00
|
|
|
// This can be called for variable pointer contexts as well, so be very careful about which method we choose.
|
|
|
|
Bitset flags;
|
|
|
|
if (ir.ids[id].get_type() == TypeVariable && has_decoration(type.self, DecorationBlock))
|
|
|
|
flags = get_buffer_block_flags(id);
|
|
|
|
else
|
|
|
|
flags = get_decoration_bitset(id);
|
|
|
|
|
2019-03-15 11:05:35 +00:00
|
|
|
return flags.get(DecorationNonWritable) ? "const device" : "device";
|
|
|
|
}
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
|
|
|
|
case StorageClassUniform:
|
|
|
|
case StorageClassUniformConstant:
|
|
|
|
case StorageClassPushConstant:
|
|
|
|
if (type.basetype == SPIRType::Struct)
|
|
|
|
{
|
|
|
|
bool ssbo = has_decoration(type.self, DecorationBufferBlock);
|
|
|
|
if (ssbo)
|
2019-03-15 11:05:35 +00:00
|
|
|
{
|
2019-03-15 11:27:54 +00:00
|
|
|
// This can be called for variable pointer contexts as well, so be very careful about which method we choose.
|
|
|
|
Bitset flags;
|
|
|
|
if (ir.ids[id].get_type() == TypeVariable && has_decoration(type.self, DecorationBlock))
|
|
|
|
flags = get_buffer_block_flags(id);
|
|
|
|
else
|
|
|
|
flags = get_decoration_bitset(id);
|
|
|
|
|
2019-03-15 11:05:35 +00:00
|
|
|
return flags.get(DecorationNonWritable) ? "const device" : "device";
|
|
|
|
}
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
else
|
|
|
|
return "constant";
|
|
|
|
}
|
2019-05-09 10:15:45 +00:00
|
|
|
else
|
|
|
|
return "constant";
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
|
|
|
|
case StorageClassFunction:
|
|
|
|
case StorageClassGeneric:
|
|
|
|
// No address space for plain values.
|
|
|
|
return type.pointer ? "thread" : "";
|
|
|
|
|
2019-01-08 22:33:32 +00:00
|
|
|
case StorageClassOutput:
|
|
|
|
if (capture_output_to_buffer)
|
|
|
|
return "device";
|
|
|
|
break;
|
|
|
|
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return "thread";
|
|
|
|
}
|
|
|
|
|
2019-03-14 09:29:34 +00:00
|
|
|
string CompilerMSL::entry_point_arg_stage_in()
|
2016-04-06 21:42:27 +00:00
|
|
|
{
|
2019-03-14 09:29:34 +00:00
|
|
|
string decl;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2017-01-01 19:43:20 +00:00
|
|
|
// Stage-in structure
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
uint32_t stage_in_id;
|
|
|
|
if (get_execution_model() == ExecutionModelTessellationEvaluation)
|
|
|
|
stage_in_id = patch_stage_in_var_id;
|
|
|
|
else
|
|
|
|
stage_in_id = stage_in_var_id;
|
|
|
|
|
|
|
|
if (stage_in_id)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
auto &var = get<SPIRVariable>(stage_in_id);
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
auto &type = get_variable_data_type(var);
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2019-03-14 09:29:34 +00:00
|
|
|
add_resource_name(var.self);
|
|
|
|
decl = join(type_to_glsl(type), " ", to_name(var.self), " [[stage_in]]");
|
|
|
|
}
|
|
|
|
|
|
|
|
return decl;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CompilerMSL::entry_point_args_builtin(string &ep_args)
|
|
|
|
{
|
|
|
|
// Builtin variables
|
|
|
|
ir.for_each_typed_id<SPIRVariable>([&](uint32_t var_id, SPIRVariable &var) {
|
2019-05-31 11:19:33 +00:00
|
|
|
auto bi_type = BuiltIn(get_decoration(var_id, DecorationBuiltIn));
|
2019-03-14 09:29:34 +00:00
|
|
|
|
|
|
|
// Don't emit SamplePosition as a separate parameter. In the entry
|
|
|
|
// point, we get that by calling get_sample_position() on the sample ID.
|
|
|
|
if (var.storage == StorageClassInput && is_builtin_variable(var) &&
|
|
|
|
get_variable_data_type(var).basetype != SPIRType::Struct &&
|
|
|
|
get_variable_data_type(var).basetype != SPIRType::ControlPointArray)
|
|
|
|
{
|
2019-05-31 11:19:33 +00:00
|
|
|
// If the builtin is not part of the active input builtin set, don't emit it.
|
|
|
|
// Relevant for multiple entry-point modules which might declare unused builtins.
|
|
|
|
if (!active_input_builtins.get(bi_type) || !interface_variable_exists_in_entry_point(var_id))
|
|
|
|
return;
|
|
|
|
|
|
|
|
// These builtins are emitted specially. If we pass this branch, the builtin directly matches
|
|
|
|
// a MSL builtin.
|
2019-03-14 09:29:34 +00:00
|
|
|
if (bi_type != BuiltInSamplePosition && bi_type != BuiltInHelperInvocation &&
|
|
|
|
bi_type != BuiltInPatchVertices && bi_type != BuiltInTessLevelInner &&
|
|
|
|
bi_type != BuiltInTessLevelOuter && bi_type != BuiltInPosition && bi_type != BuiltInPointSize &&
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
bi_type != BuiltInClipDistance && bi_type != BuiltInCullDistance && bi_type != BuiltInSubgroupEqMask &&
|
2019-06-13 09:33:40 +00:00
|
|
|
bi_type != BuiltInBaryCoordNV && bi_type != BuiltInBaryCoordNoPerspNV &&
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
bi_type != BuiltInSubgroupGeMask && bi_type != BuiltInSubgroupGtMask &&
|
|
|
|
bi_type != BuiltInSubgroupLeMask && bi_type != BuiltInSubgroupLtMask)
|
2019-03-14 09:29:34 +00:00
|
|
|
{
|
|
|
|
if (!ep_args.empty())
|
|
|
|
ep_args += ", ";
|
|
|
|
|
2019-06-13 09:33:40 +00:00
|
|
|
ep_args += builtin_type_decl(bi_type, var_id) + " " + to_expression(var_id);
|
2019-03-14 09:29:34 +00:00
|
|
|
ep_args += " [[" + builtin_qualifier(bi_type) + "]]";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
// Vertex and instance index built-ins
|
|
|
|
if (needs_vertex_idx_arg)
|
|
|
|
ep_args += built_in_func_arg(BuiltInVertexIndex, !ep_args.empty());
|
|
|
|
|
|
|
|
if (needs_instance_idx_arg)
|
|
|
|
ep_args += built_in_func_arg(BuiltInInstanceIndex, !ep_args.empty());
|
|
|
|
|
|
|
|
if (capture_output_to_buffer)
|
|
|
|
{
|
|
|
|
// Add parameters to hold the indirect draw parameters and the shader output. This has to be handled
|
|
|
|
// specially because it needs to be a pointer, not a reference.
|
|
|
|
if (stage_out_var_id)
|
|
|
|
{
|
|
|
|
if (!ep_args.empty())
|
|
|
|
ep_args += ", ";
|
|
|
|
ep_args += join("device ", type_to_glsl(get_stage_out_struct_type()), "* ", output_buffer_var_name,
|
|
|
|
" [[buffer(", msl_options.shader_output_buffer_index, ")]]");
|
|
|
|
}
|
|
|
|
|
2019-04-16 16:56:02 +00:00
|
|
|
if (get_execution_model() == ExecutionModelTessellationControl)
|
2019-03-14 09:29:34 +00:00
|
|
|
{
|
|
|
|
if (!ep_args.empty())
|
|
|
|
ep_args += ", ";
|
|
|
|
ep_args +=
|
2019-04-23 10:21:53 +00:00
|
|
|
join("constant uint* spvIndirectParams [[buffer(", msl_options.indirect_params_buffer_index, ")]]");
|
2019-04-16 18:13:57 +00:00
|
|
|
}
|
|
|
|
else if (stage_out_var_id)
|
|
|
|
{
|
|
|
|
if (!ep_args.empty())
|
|
|
|
ep_args += ", ";
|
|
|
|
ep_args +=
|
2019-04-23 10:21:53 +00:00
|
|
|
join("device uint* spvIndirectParams [[buffer(", msl_options.indirect_params_buffer_index, ")]]");
|
2019-03-14 09:29:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Tessellation control shaders get three additional parameters:
|
|
|
|
// a buffer to hold the per-patch data, a buffer to hold the per-patch
|
|
|
|
// tessellation levels, and a block of workgroup memory to hold the
|
|
|
|
// input control point data.
|
|
|
|
if (get_execution_model() == ExecutionModelTessellationControl)
|
|
|
|
{
|
|
|
|
if (patch_stage_out_var_id)
|
|
|
|
{
|
|
|
|
if (!ep_args.empty())
|
|
|
|
ep_args += ", ";
|
|
|
|
ep_args +=
|
|
|
|
join("device ", type_to_glsl(get_patch_stage_out_struct_type()), "* ", patch_output_buffer_var_name,
|
|
|
|
" [[buffer(", convert_to_string(msl_options.shader_patch_output_buffer_index), ")]]");
|
|
|
|
}
|
|
|
|
if (!ep_args.empty())
|
|
|
|
ep_args += ", ";
|
|
|
|
ep_args += join("device ", get_tess_factor_struct_name(), "* ", tess_factor_buffer_var_name, " [[buffer(",
|
|
|
|
convert_to_string(msl_options.shader_tess_factor_buffer_index), ")]]");
|
|
|
|
if (stage_in_var_id)
|
|
|
|
{
|
|
|
|
if (!ep_args.empty())
|
|
|
|
ep_args += ", ";
|
|
|
|
ep_args += join("threadgroup ", type_to_glsl(get_stage_in_struct_type()), "* ", input_wg_var_name,
|
|
|
|
" [[threadgroup(", convert_to_string(msl_options.shader_input_wg_index), ")]]");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
string CompilerMSL::entry_point_args_argument_buffer(bool append_comma)
|
|
|
|
{
|
|
|
|
string ep_args = entry_point_arg_stage_in();
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < kMaxArgumentBuffers; i++)
|
|
|
|
{
|
|
|
|
uint32_t id = argument_buffer_ids[i];
|
|
|
|
if (id == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
add_resource_name(id);
|
|
|
|
auto &var = get<SPIRVariable>(id);
|
|
|
|
auto &type = get_variable_data_type(var);
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
if (!ep_args.empty())
|
|
|
|
ep_args += ", ";
|
2017-01-01 19:43:20 +00:00
|
|
|
|
2019-03-14 09:29:34 +00:00
|
|
|
ep_args += get_argument_address_space(var) + " " + type_to_glsl(type) + "& " + to_name(id);
|
|
|
|
ep_args += " [[buffer(" + convert_to_string(i) + ")]]";
|
|
|
|
|
|
|
|
// Makes it more practical for testing, since the push constant block can occupy the first available
|
|
|
|
// buffer slot if it's not bound explicitly.
|
|
|
|
next_metal_resource_index_buffer = i + 1;
|
|
|
|
}
|
|
|
|
|
2019-03-15 20:53:21 +00:00
|
|
|
entry_point_args_discrete_descriptors(ep_args);
|
2019-03-14 09:29:34 +00:00
|
|
|
entry_point_args_builtin(ep_args);
|
|
|
|
|
|
|
|
if (!ep_args.empty() && append_comma)
|
|
|
|
ep_args += ", ";
|
|
|
|
|
|
|
|
return ep_args;
|
|
|
|
}
|
|
|
|
|
2019-06-10 13:41:36 +00:00
|
|
|
const MSLConstexprSampler *CompilerMSL::find_constexpr_sampler(uint32_t id) const
|
|
|
|
{
|
|
|
|
// Try by ID.
|
|
|
|
{
|
|
|
|
auto itr = constexpr_samplers_by_id.find(id);
|
|
|
|
if (itr != end(constexpr_samplers_by_id))
|
|
|
|
return &itr->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try by binding.
|
|
|
|
{
|
|
|
|
uint32_t desc_set = get_decoration(id, DecorationDescriptorSet);
|
|
|
|
uint32_t binding = get_decoration(id, DecorationBinding);
|
|
|
|
|
|
|
|
auto itr = constexpr_samplers_by_binding.find({ desc_set, binding });
|
|
|
|
if (itr != end(constexpr_samplers_by_binding))
|
|
|
|
return &itr->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2019-03-15 20:53:21 +00:00
|
|
|
void CompilerMSL::entry_point_args_discrete_descriptors(string &ep_args)
|
2019-03-14 09:29:34 +00:00
|
|
|
{
|
2018-01-26 02:19:38 +00:00
|
|
|
// Output resources, sorted by resource index & type
|
|
|
|
// We need to sort to work around a bug on macOS 10.13 with NVidia drivers where switching between shaders
|
|
|
|
// with different order of buffers can result in issues with buffer assignments inside the driver.
|
|
|
|
struct Resource
|
|
|
|
{
|
2019-03-14 09:29:34 +00:00
|
|
|
SPIRVariable *var;
|
2018-01-26 02:19:38 +00:00
|
|
|
string name;
|
|
|
|
SPIRType::BaseType basetype;
|
|
|
|
uint32_t index;
|
|
|
|
};
|
|
|
|
|
2019-04-02 09:19:03 +00:00
|
|
|
SmallVector<Resource> resources;
|
2018-01-26 02:19:38 +00:00
|
|
|
|
2019-03-14 09:29:34 +00:00
|
|
|
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
|
2019-01-10 08:49:33 +00:00
|
|
|
if ((var.storage == StorageClassUniform || var.storage == StorageClassUniformConstant ||
|
|
|
|
var.storage == StorageClassPushConstant || var.storage == StorageClassStorageBuffer) &&
|
|
|
|
!is_hidden_variable(var))
|
|
|
|
{
|
2019-03-14 09:29:34 +00:00
|
|
|
auto &type = get_variable_data_type(var);
|
|
|
|
uint32_t var_id = var.self;
|
|
|
|
|
2019-03-15 13:07:03 +00:00
|
|
|
if (var.storage != StorageClassPushConstant)
|
|
|
|
{
|
|
|
|
uint32_t desc_set = get_decoration(var_id, DecorationDescriptorSet);
|
|
|
|
if (descriptor_set_is_argument_buffer(desc_set))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-06-10 13:41:36 +00:00
|
|
|
const MSLConstexprSampler *constexpr_sampler = nullptr;
|
|
|
|
if (type.basetype == SPIRType::SampledImage || type.basetype == SPIRType::Sampler)
|
|
|
|
{
|
|
|
|
constexpr_sampler = find_constexpr_sampler(var_id);
|
|
|
|
if (constexpr_sampler)
|
|
|
|
{
|
|
|
|
// Mark this ID as a constexpr sampler for later in case it came from set/bindings.
|
|
|
|
constexpr_samplers_by_id[var_id] = *constexpr_sampler;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-10 08:49:33 +00:00
|
|
|
if (type.basetype == SPIRType::SampledImage)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
2019-01-18 15:27:57 +00:00
|
|
|
add_resource_name(var_id);
|
2019-01-10 08:49:33 +00:00
|
|
|
resources.push_back(
|
2019-03-14 09:29:34 +00:00
|
|
|
{ &var, to_name(var_id), SPIRType::Image, get_metal_resource_index(var, SPIRType::Image) });
|
2018-01-26 02:19:38 +00:00
|
|
|
|
2019-06-10 13:41:36 +00:00
|
|
|
if (type.image.dim != DimBuffer && !constexpr_sampler)
|
2018-01-26 02:19:38 +00:00
|
|
|
{
|
2019-03-14 09:29:34 +00:00
|
|
|
resources.push_back({ &var, to_sampler_expression(var_id), SPIRType::Sampler,
|
2019-01-10 08:49:33 +00:00
|
|
|
get_metal_resource_index(var, SPIRType::Sampler) });
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
|
|
|
}
|
2019-06-10 13:41:36 +00:00
|
|
|
else if (!constexpr_sampler)
|
2019-01-10 08:49:33 +00:00
|
|
|
{
|
|
|
|
// constexpr samplers are not declared as resources.
|
2019-01-18 15:27:57 +00:00
|
|
|
add_resource_name(var_id);
|
2019-01-10 08:49:33 +00:00
|
|
|
resources.push_back(
|
2019-03-14 09:29:34 +00:00
|
|
|
{ &var, to_name(var_id), type.basetype, get_metal_resource_index(var, type.basetype) });
|
2019-01-10 08:49:33 +00:00
|
|
|
}
|
2018-01-26 02:19:38 +00:00
|
|
|
}
|
2019-01-10 08:49:33 +00:00
|
|
|
});
|
2018-01-26 02:19:38 +00:00
|
|
|
|
2019-03-15 13:07:03 +00:00
|
|
|
sort(resources.begin(), resources.end(), [](const Resource &lhs, const Resource &rhs) {
|
2018-01-29 14:42:34 +00:00
|
|
|
return tie(lhs.basetype, lhs.index) < tie(rhs.basetype, rhs.index);
|
|
|
|
});
|
2018-01-26 02:19:38 +00:00
|
|
|
|
|
|
|
for (auto &r : resources)
|
|
|
|
{
|
2019-03-14 09:29:34 +00:00
|
|
|
auto &var = *r.var;
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
auto &type = get_variable_data_type(var);
|
2018-01-26 02:19:38 +00:00
|
|
|
|
|
|
|
uint32_t var_id = var.self;
|
|
|
|
|
|
|
|
switch (r.basetype)
|
|
|
|
{
|
|
|
|
case SPIRType::Struct:
|
|
|
|
{
|
2019-01-10 13:04:01 +00:00
|
|
|
auto &m = ir.meta[type.self];
|
2018-01-26 02:19:38 +00:00
|
|
|
if (m.members.size() == 0)
|
|
|
|
break;
|
2018-09-27 01:06:05 +00:00
|
|
|
if (!type.array.empty())
|
|
|
|
{
|
2018-09-27 16:01:46 +00:00
|
|
|
if (type.array.size() > 1)
|
|
|
|
SPIRV_CROSS_THROW("Arrays of arrays of buffers are not supported.");
|
|
|
|
|
2018-09-27 01:06:05 +00:00
|
|
|
// Metal doesn't directly support this, so we must expand the
|
|
|
|
// array. We'll declare a local array to hold these elements
|
|
|
|
// later.
|
2018-11-01 13:56:25 +00:00
|
|
|
uint32_t array_size = to_array_size_literal(type);
|
2018-09-27 16:01:46 +00:00
|
|
|
|
|
|
|
if (array_size == 0)
|
|
|
|
SPIRV_CROSS_THROW("Unsized arrays of buffers are not supported in MSL.");
|
|
|
|
|
2018-09-27 01:06:05 +00:00
|
|
|
buffer_arrays.push_back(var_id);
|
|
|
|
for (uint32_t i = 0; i < array_size; ++i)
|
|
|
|
{
|
|
|
|
if (!ep_args.empty())
|
|
|
|
ep_args += ", ";
|
|
|
|
ep_args += get_argument_address_space(var) + " " + type_to_glsl(type) + "* " + r.name + "_" +
|
|
|
|
convert_to_string(i);
|
|
|
|
ep_args += " [[buffer(" + convert_to_string(r.index + i) + ")]]";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (!ep_args.empty())
|
|
|
|
ep_args += ", ";
|
|
|
|
ep_args += get_argument_address_space(var) + " " + type_to_glsl(type) + "& " + r.name;
|
|
|
|
ep_args += " [[buffer(" + convert_to_string(r.index) + ")]]";
|
|
|
|
}
|
2018-01-26 02:19:38 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case SPIRType::Sampler:
|
|
|
|
if (!ep_args.empty())
|
|
|
|
ep_args += ", ";
|
2018-04-03 12:26:24 +00:00
|
|
|
ep_args += sampler_type(type) + " " + r.name;
|
2018-01-26 02:19:38 +00:00
|
|
|
ep_args += " [[sampler(" + convert_to_string(r.index) + ")]]";
|
|
|
|
break;
|
|
|
|
case SPIRType::Image:
|
|
|
|
if (!ep_args.empty())
|
|
|
|
ep_args += ", ";
|
2018-04-03 12:26:24 +00:00
|
|
|
ep_args += image_type_glsl(type, var_id) + " " + r.name;
|
2018-01-26 02:19:38 +00:00
|
|
|
ep_args += " [[texture(" + convert_to_string(r.index) + ")]]";
|
|
|
|
break;
|
|
|
|
default:
|
2019-05-09 10:15:45 +00:00
|
|
|
if (!ep_args.empty())
|
|
|
|
ep_args += ", ";
|
|
|
|
ep_args += type_to_glsl(type, var_id) + " " + r.name;
|
|
|
|
ep_args += " [[buffer(" + convert_to_string(r.index) + ")]]";
|
2018-01-26 02:19:38 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2019-03-15 13:07:03 +00:00
|
|
|
}
|
2018-01-26 02:19:38 +00:00
|
|
|
|
2019-03-15 13:07:03 +00:00
|
|
|
// Returns a string containing a comma-delimited list of args for the entry point function
|
|
|
|
// This is the "classic" method of MSL 1 when we don't have argument buffer support.
|
|
|
|
string CompilerMSL::entry_point_args_classic(bool append_comma)
|
|
|
|
{
|
|
|
|
string ep_args = entry_point_arg_stage_in();
|
2019-03-15 20:53:21 +00:00
|
|
|
entry_point_args_discrete_descriptors(ep_args);
|
2019-03-14 09:29:34 +00:00
|
|
|
entry_point_args_builtin(ep_args);
|
2019-01-08 22:33:32 +00:00
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
if (!ep_args.empty() && append_comma)
|
|
|
|
ep_args += ", ";
|
|
|
|
|
|
|
|
return ep_args;
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 23:52:53 +00:00
|
|
|
void CompilerMSL::fix_up_shader_inputs_outputs()
|
|
|
|
{
|
2019-05-27 09:59:29 +00:00
|
|
|
// Look for sampled images and buffer. Add hooks to set up the swizzle constants or array lengths.
|
2019-01-16 23:52:53 +00:00
|
|
|
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
|
|
|
|
auto &type = get_variable_data_type(var);
|
|
|
|
uint32_t var_id = var.self;
|
2019-05-27 09:59:29 +00:00
|
|
|
bool ssbo = has_decoration(type.self, DecorationBufferBlock);
|
2019-01-16 23:52:53 +00:00
|
|
|
|
2019-05-27 09:59:29 +00:00
|
|
|
if (var.storage == StorageClassUniformConstant && !is_hidden_variable(var))
|
2019-01-16 23:52:53 +00:00
|
|
|
{
|
|
|
|
if (msl_options.swizzle_texture_samples && has_sampled_images && is_sampled_image_type(type))
|
|
|
|
{
|
|
|
|
auto &entry_func = this->get<SPIRFunction>(ir.default_entry_point);
|
2019-05-09 09:25:45 +00:00
|
|
|
entry_func.fixup_hooks_in.push_back([this, &type, &var, var_id]() {
|
|
|
|
bool is_array_type = !type.array.empty();
|
|
|
|
|
2019-05-09 10:15:45 +00:00
|
|
|
uint32_t desc_set = get_decoration(var_id, DecorationDescriptorSet);
|
|
|
|
if (descriptor_set_is_argument_buffer(desc_set))
|
|
|
|
{
|
2019-05-27 09:59:29 +00:00
|
|
|
statement("constant uint", is_array_type ? "* " : "& ", to_swizzle_expression(var_id),
|
2019-05-09 10:15:45 +00:00
|
|
|
is_array_type ? " = &" : " = ", to_name(argument_buffer_ids[desc_set]),
|
|
|
|
".spvSwizzleConstants", "[",
|
|
|
|
convert_to_string(get_metal_resource_index(var, SPIRType::Image)), "];");
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// If we have an array of images, we need to be able to index into it, so take a pointer instead.
|
2019-05-27 09:59:29 +00:00
|
|
|
statement("constant uint", is_array_type ? "* " : "& ", to_swizzle_expression(var_id),
|
2019-05-09 10:15:45 +00:00
|
|
|
is_array_type ? " = &" : " = ", to_name(swizzle_buffer_id), "[",
|
|
|
|
convert_to_string(get_metal_resource_index(var, SPIRType::Image)), "];");
|
|
|
|
}
|
2019-01-16 23:52:53 +00:00
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
2019-05-27 14:54:13 +00:00
|
|
|
else if ((var.storage == StorageClassStorageBuffer || (var.storage == StorageClassUniform && ssbo)) &&
|
|
|
|
!is_hidden_variable(var))
|
2019-05-27 09:59:29 +00:00
|
|
|
{
|
|
|
|
if (buffers_requiring_array_length.count(var.self))
|
|
|
|
{
|
|
|
|
auto &entry_func = this->get<SPIRFunction>(ir.default_entry_point);
|
|
|
|
entry_func.fixup_hooks_in.push_back([this, &type, &var, var_id]() {
|
|
|
|
bool is_array_type = !type.array.empty();
|
|
|
|
|
|
|
|
uint32_t desc_set = get_decoration(var_id, DecorationDescriptorSet);
|
|
|
|
if (descriptor_set_is_argument_buffer(desc_set))
|
|
|
|
{
|
|
|
|
statement("constant uint", is_array_type ? "* " : "& ", to_buffer_size_expression(var_id),
|
|
|
|
is_array_type ? " = &" : " = ", to_name(argument_buffer_ids[desc_set]),
|
|
|
|
".spvBufferSizeConstants", "[",
|
|
|
|
convert_to_string(get_metal_resource_index(var, SPIRType::Image)), "];");
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// If we have an array of images, we need to be able to index into it, so take a pointer instead.
|
|
|
|
statement("constant uint", is_array_type ? "* " : "& ", to_buffer_size_expression(var_id),
|
|
|
|
is_array_type ? " = &" : " = ", to_name(buffer_size_buffer_id), "[",
|
|
|
|
convert_to_string(get_metal_resource_index(var, type.basetype)), "];");
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
2019-01-16 23:52:53 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
// Builtin variables
|
|
|
|
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
|
|
|
|
uint32_t var_id = var.self;
|
|
|
|
BuiltIn bi_type = ir.meta[var_id].decoration.builtin_type;
|
|
|
|
|
|
|
|
if (var.storage == StorageClassInput && is_builtin_variable(var))
|
|
|
|
{
|
|
|
|
auto &entry_func = this->get<SPIRFunction>(ir.default_entry_point);
|
|
|
|
switch (bi_type)
|
|
|
|
{
|
|
|
|
case BuiltInSamplePosition:
|
|
|
|
entry_func.fixup_hooks_in.push_back([=]() {
|
|
|
|
statement(builtin_type_decl(bi_type), " ", to_expression(var_id), " = get_sample_position(",
|
|
|
|
to_expression(builtin_sample_id_id), ");");
|
|
|
|
});
|
|
|
|
break;
|
|
|
|
case BuiltInHelperInvocation:
|
|
|
|
if (msl_options.is_ios())
|
|
|
|
SPIRV_CROSS_THROW("simd_is_helper_thread() is only supported on macOS.");
|
|
|
|
else if (msl_options.is_macos() && !msl_options.supports_msl_version(2, 1))
|
|
|
|
SPIRV_CROSS_THROW("simd_is_helper_thread() requires version 2.1 on macOS.");
|
|
|
|
|
|
|
|
entry_func.fixup_hooks_in.push_back([=]() {
|
|
|
|
statement(builtin_type_decl(bi_type), " ", to_expression(var_id), " = simd_is_helper_thread();");
|
|
|
|
});
|
|
|
|
break;
|
2019-02-04 05:58:46 +00:00
|
|
|
case BuiltInPatchVertices:
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
if (get_execution_model() == ExecutionModelTessellationEvaluation)
|
|
|
|
entry_func.fixup_hooks_in.push_back([=]() {
|
|
|
|
statement(builtin_type_decl(bi_type), " ", to_expression(var_id), " = ",
|
|
|
|
to_expression(patch_stage_in_var_id), ".gl_in.size();");
|
|
|
|
});
|
|
|
|
else
|
|
|
|
entry_func.fixup_hooks_in.push_back([=]() {
|
|
|
|
statement(builtin_type_decl(bi_type), " ", to_expression(var_id), " = spvIndirectParams[0];");
|
|
|
|
});
|
2019-02-04 05:58:46 +00:00
|
|
|
break;
|
2019-02-06 05:47:50 +00:00
|
|
|
case BuiltInTessCoord:
|
2019-02-20 20:07:18 +00:00
|
|
|
// Emit a fixup to account for the shifted domain. Don't do this for triangles;
|
|
|
|
// MoltenVK will just reverse the winding order instead.
|
|
|
|
if (msl_options.tess_domain_origin_lower_left && !get_entry_point().flags.get(ExecutionModeTriangles))
|
2019-02-06 05:47:50 +00:00
|
|
|
{
|
|
|
|
string tc = to_expression(var_id);
|
2019-02-20 20:07:18 +00:00
|
|
|
entry_func.fixup_hooks_in.push_back([=]() { statement(tc, ".y = 1.0 - ", tc, ".y;"); });
|
2019-02-06 05:47:50 +00:00
|
|
|
}
|
|
|
|
break;
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
case BuiltInSubgroupEqMask:
|
|
|
|
if (msl_options.is_ios())
|
|
|
|
SPIRV_CROSS_THROW("Subgroup ballot functionality is unavailable on iOS.");
|
|
|
|
if (!msl_options.supports_msl_version(2, 1))
|
|
|
|
SPIRV_CROSS_THROW("Subgroup ballot functionality requires Metal 2.1.");
|
|
|
|
entry_func.fixup_hooks_in.push_back([=]() {
|
|
|
|
statement(builtin_type_decl(bi_type), " ", to_expression(var_id), " = ",
|
|
|
|
builtin_subgroup_invocation_id_id, " > 32 ? uint4(0, (1 << (",
|
|
|
|
to_expression(builtin_subgroup_invocation_id_id), " - 32)), uint2(0)) : uint4(1 << ",
|
|
|
|
to_expression(builtin_subgroup_invocation_id_id), ", uint3(0));");
|
|
|
|
});
|
|
|
|
break;
|
|
|
|
case BuiltInSubgroupGeMask:
|
|
|
|
if (msl_options.is_ios())
|
|
|
|
SPIRV_CROSS_THROW("Subgroup ballot functionality is unavailable on iOS.");
|
|
|
|
if (!msl_options.supports_msl_version(2, 1))
|
|
|
|
SPIRV_CROSS_THROW("Subgroup ballot functionality requires Metal 2.1.");
|
|
|
|
entry_func.fixup_hooks_in.push_back([=]() {
|
|
|
|
// Case where index < 32, size < 32:
|
|
|
|
// mask0 = bfe(0xFFFFFFFF, index, size - index);
|
|
|
|
// mask1 = bfe(0xFFFFFFFF, 0, 0); // Gives 0
|
|
|
|
// Case where index < 32 but size >= 32:
|
|
|
|
// mask0 = bfe(0xFFFFFFFF, index, 32 - index);
|
|
|
|
// mask1 = bfe(0xFFFFFFFF, 0, size - 32);
|
|
|
|
// Case where index >= 32:
|
|
|
|
// mask0 = bfe(0xFFFFFFFF, 32, 0); // Gives 0
|
|
|
|
// mask1 = bfe(0xFFFFFFFF, index - 32, size - index);
|
|
|
|
// This is expressed without branches to avoid divergent
|
|
|
|
// control flow--hence the complicated min/max expressions.
|
|
|
|
// This is further complicated by the fact that if you attempt
|
|
|
|
// to bfe out-of-bounds on Metal, undefined behavior is the
|
|
|
|
// result.
|
|
|
|
statement(builtin_type_decl(bi_type), " ", to_expression(var_id),
|
|
|
|
" = uint4(extract_bits(0xFFFFFFFF, min(",
|
|
|
|
to_expression(builtin_subgroup_invocation_id_id), ", 32u), (uint)max(min((int)",
|
|
|
|
to_expression(builtin_subgroup_size_id), ", 32) - (int)",
|
|
|
|
to_expression(builtin_subgroup_invocation_id_id),
|
|
|
|
", 0)), extract_bits(0xFFFFFFFF, (uint)max((int)",
|
|
|
|
to_expression(builtin_subgroup_invocation_id_id), " - 32, 0), (uint)max((int)",
|
|
|
|
to_expression(builtin_subgroup_size_id), " - (int)max(",
|
|
|
|
to_expression(builtin_subgroup_invocation_id_id), ", 32u), 0)), uint2(0));");
|
|
|
|
});
|
|
|
|
break;
|
|
|
|
case BuiltInSubgroupGtMask:
|
|
|
|
if (msl_options.is_ios())
|
|
|
|
SPIRV_CROSS_THROW("Subgroup ballot functionality is unavailable on iOS.");
|
|
|
|
if (!msl_options.supports_msl_version(2, 1))
|
|
|
|
SPIRV_CROSS_THROW("Subgroup ballot functionality requires Metal 2.1.");
|
|
|
|
entry_func.fixup_hooks_in.push_back([=]() {
|
|
|
|
// The same logic applies here, except now the index is one
|
|
|
|
// more than the subgroup invocation ID.
|
|
|
|
statement(builtin_type_decl(bi_type), " ", to_expression(var_id),
|
|
|
|
" = uint4(extract_bits(0xFFFFFFFF, min(",
|
|
|
|
to_expression(builtin_subgroup_invocation_id_id), " + 1, 32u), (uint)max(min((int)",
|
|
|
|
to_expression(builtin_subgroup_size_id), ", 32) - (int)",
|
|
|
|
to_expression(builtin_subgroup_invocation_id_id),
|
|
|
|
" - 1, 0)), extract_bits(0xFFFFFFFF, (uint)max((int)",
|
|
|
|
to_expression(builtin_subgroup_invocation_id_id), " + 1 - 32, 0), (uint)max((int)",
|
|
|
|
to_expression(builtin_subgroup_size_id), " - (int)max(",
|
|
|
|
to_expression(builtin_subgroup_invocation_id_id), " + 1, 32u), 0)), uint2(0));");
|
|
|
|
});
|
|
|
|
break;
|
|
|
|
case BuiltInSubgroupLeMask:
|
|
|
|
if (msl_options.is_ios())
|
|
|
|
SPIRV_CROSS_THROW("Subgroup ballot functionality is unavailable on iOS.");
|
|
|
|
if (!msl_options.supports_msl_version(2, 1))
|
|
|
|
SPIRV_CROSS_THROW("Subgroup ballot functionality requires Metal 2.1.");
|
|
|
|
entry_func.fixup_hooks_in.push_back([=]() {
|
|
|
|
statement(builtin_type_decl(bi_type), " ", to_expression(var_id),
|
|
|
|
" = uint4(extract_bits(0xFFFFFFFF, 0, min(",
|
|
|
|
to_expression(builtin_subgroup_invocation_id_id),
|
|
|
|
" + 1, 32u)), extract_bits(0xFFFFFFFF, 0, (uint)max((int)",
|
|
|
|
to_expression(builtin_subgroup_invocation_id_id), " + 1 - 32, 0)), uint2(0));");
|
|
|
|
});
|
|
|
|
break;
|
|
|
|
case BuiltInSubgroupLtMask:
|
|
|
|
if (msl_options.is_ios())
|
|
|
|
SPIRV_CROSS_THROW("Subgroup ballot functionality is unavailable on iOS.");
|
|
|
|
if (!msl_options.supports_msl_version(2, 1))
|
|
|
|
SPIRV_CROSS_THROW("Subgroup ballot functionality requires Metal 2.1.");
|
|
|
|
entry_func.fixup_hooks_in.push_back([=]() {
|
|
|
|
statement(builtin_type_decl(bi_type), " ", to_expression(var_id),
|
|
|
|
" = uint4(extract_bits(0xFFFFFFFF, 0, min(",
|
|
|
|
to_expression(builtin_subgroup_invocation_id_id),
|
|
|
|
", 32u)), extract_bits(0xFFFFFFFF, 0, (uint)max((int)",
|
|
|
|
to_expression(builtin_subgroup_invocation_id_id), " - 32, 0)), uint2(0));");
|
|
|
|
});
|
|
|
|
break;
|
2019-01-16 23:52:53 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2016-04-06 21:42:27 +00:00
|
|
|
// Returns the Metal index of the resource of the specified type as used by the specified variable.
|
2016-05-05 07:33:18 +00:00
|
|
|
uint32_t CompilerMSL::get_metal_resource_index(SPIRVariable &var, SPIRType::BaseType basetype)
|
|
|
|
{
|
2016-07-28 09:16:02 +00:00
|
|
|
auto &execution = get_entry_point();
|
2018-10-05 09:30:57 +00:00
|
|
|
auto &var_dec = ir.meta[var.self].decoration;
|
2016-05-05 07:33:18 +00:00
|
|
|
uint32_t var_desc_set = (var.storage == StorageClassPushConstant) ? kPushConstDescSet : var_dec.set;
|
|
|
|
uint32_t var_binding = (var.storage == StorageClassPushConstant) ? kPushConstBinding : var_dec.binding;
|
|
|
|
|
2019-06-10 13:41:36 +00:00
|
|
|
// If a matching binding has been specified, find and use it.
|
|
|
|
auto itr = resource_bindings.find({ execution.model, var_desc_set, var_binding });
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2019-02-12 10:11:29 +00:00
|
|
|
if (itr != end(resource_bindings))
|
|
|
|
{
|
2019-06-10 13:41:36 +00:00
|
|
|
auto &remap = itr->second;
|
|
|
|
remap.second = true;
|
2019-03-04 09:08:31 +00:00
|
|
|
switch (basetype)
|
|
|
|
{
|
|
|
|
case SPIRType::Image:
|
2019-06-10 13:41:36 +00:00
|
|
|
return remap.first.msl_texture;
|
2019-03-04 09:08:31 +00:00
|
|
|
case SPIRType::Sampler:
|
2019-06-10 13:41:36 +00:00
|
|
|
return remap.first.msl_sampler;
|
2019-03-04 09:08:31 +00:00
|
|
|
default:
|
2019-06-10 13:41:36 +00:00
|
|
|
return remap.first.msl_buffer;
|
2019-03-04 09:08:31 +00:00
|
|
|
}
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
|
|
|
|
2018-04-04 08:00:37 +00:00
|
|
|
// If there is no explicit mapping of bindings to MSL, use the declared binding.
|
|
|
|
if (has_decoration(var.self, DecorationBinding))
|
2019-05-09 10:15:45 +00:00
|
|
|
{
|
|
|
|
var_binding = get_decoration(var.self, DecorationBinding);
|
|
|
|
// Avoid emitting sentinel bindings.
|
|
|
|
if (var_binding < 0x80000000u)
|
|
|
|
return var_binding;
|
|
|
|
}
|
2018-04-04 08:00:37 +00:00
|
|
|
|
|
|
|
uint32_t binding_stride = 1;
|
|
|
|
auto &type = get<SPIRType>(var.basetype);
|
|
|
|
for (uint32_t i = 0; i < uint32_t(type.array.size()); i++)
|
|
|
|
binding_stride *= type.array_size_literal[i] ? type.array[i] : get<SPIRConstant>(type.array[i]).scalar();
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
// If a binding has not been specified, revert to incrementing resource indices
|
2018-04-04 08:00:37 +00:00
|
|
|
uint32_t resource_index;
|
2016-05-05 07:33:18 +00:00
|
|
|
switch (basetype)
|
|
|
|
{
|
|
|
|
case SPIRType::Image:
|
2019-02-12 10:11:29 +00:00
|
|
|
resource_index = next_metal_resource_index_texture;
|
|
|
|
next_metal_resource_index_texture += binding_stride;
|
2018-04-04 08:00:37 +00:00
|
|
|
break;
|
2016-05-05 07:33:18 +00:00
|
|
|
case SPIRType::Sampler:
|
2019-02-12 10:11:29 +00:00
|
|
|
resource_index = next_metal_resource_index_sampler;
|
|
|
|
next_metal_resource_index_sampler += binding_stride;
|
2018-04-04 08:00:37 +00:00
|
|
|
break;
|
2016-05-05 07:33:18 +00:00
|
|
|
default:
|
2019-05-09 10:15:45 +00:00
|
|
|
resource_index = next_metal_resource_index_buffer;
|
|
|
|
next_metal_resource_index_buffer += binding_stride;
|
2018-04-04 08:00:37 +00:00
|
|
|
break;
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
2018-04-04 08:00:37 +00:00
|
|
|
return resource_index;
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
string CompilerMSL::argument_decl(const SPIRFunction::Parameter &arg)
|
|
|
|
{
|
2017-11-06 02:34:42 +00:00
|
|
|
auto &var = get<SPIRVariable>(arg.id);
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
auto &type = get_variable_data_type(var);
|
|
|
|
auto &var_type = get<SPIRType>(arg.type);
|
|
|
|
StorageClass storage = var_type.storage;
|
|
|
|
bool is_pointer = var_type.pointer;
|
2018-08-06 13:41:10 +00:00
|
|
|
|
2018-08-27 07:59:55 +00:00
|
|
|
// If we need to modify the name of the variable, make sure we use the original variable.
|
|
|
|
// Our alias is just a shadow variable.
|
|
|
|
uint32_t name_id = var.self;
|
|
|
|
if (arg.alias_global_variable && var.basevariable)
|
|
|
|
name_id = var.basevariable;
|
|
|
|
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
bool constref = !arg.alias_global_variable && is_pointer && arg.write_count == 0;
|
2017-01-15 15:39:03 +00:00
|
|
|
|
2018-04-03 12:08:15 +00:00
|
|
|
bool type_is_image = type.basetype == SPIRType::Image || type.basetype == SPIRType::SampledImage ||
|
|
|
|
type.basetype == SPIRType::Sampler;
|
2018-04-03 12:00:34 +00:00
|
|
|
|
|
|
|
// Arrays of images/samplers in MSL are always const.
|
|
|
|
if (!type.array.empty() && type_is_image)
|
|
|
|
constref = true;
|
|
|
|
|
2017-11-06 02:34:42 +00:00
|
|
|
string decl;
|
|
|
|
if (constref)
|
|
|
|
decl += "const ";
|
|
|
|
|
2018-09-17 16:51:09 +00:00
|
|
|
bool builtin = is_builtin_variable(var);
|
2019-02-04 05:58:46 +00:00
|
|
|
if (var.basevariable == stage_in_ptr_var_id || var.basevariable == stage_out_ptr_var_id)
|
|
|
|
decl += type_to_glsl(type, arg.id);
|
|
|
|
else if (builtin)
|
2019-06-13 09:33:40 +00:00
|
|
|
decl += builtin_type_decl(static_cast<BuiltIn>(get_decoration(arg.id, DecorationBuiltIn)), arg.id);
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
else if ((storage == StorageClassUniform || storage == StorageClassStorageBuffer) && is_array(type))
|
|
|
|
decl += join(type_to_glsl(type, arg.id), "*");
|
2017-12-26 21:32:45 +00:00
|
|
|
else
|
|
|
|
decl += type_to_glsl(type, arg.id);
|
2017-11-06 02:34:42 +00:00
|
|
|
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
bool opaque_handle = storage == StorageClassUniformConstant;
|
2018-08-06 13:41:10 +00:00
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
string address_space = get_argument_address_space(var);
|
|
|
|
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
if (!builtin && !opaque_handle && !is_pointer &&
|
|
|
|
(storage == StorageClassFunction || storage == StorageClassGeneric))
|
2018-08-06 13:41:10 +00:00
|
|
|
{
|
|
|
|
// If the argument is a pure value and not an opaque type, we will pass by value.
|
2019-01-14 09:08:35 +00:00
|
|
|
if (is_array(type))
|
|
|
|
{
|
|
|
|
// We are receiving an array by value. This is problematic.
|
|
|
|
// We cannot be sure of the target address space since we are supposed to receive a copy,
|
|
|
|
// but this is not possible with MSL without some extra work.
|
|
|
|
// We will have to assume we're getting a reference in thread address space.
|
|
|
|
// If we happen to get a reference in constant address space, the caller must emit a copy and pass that.
|
|
|
|
// Thread const therefore becomes the only logical choice, since we cannot "create" a constant array from
|
|
|
|
// non-constant arrays, but we can create thread const from constant.
|
|
|
|
decl = string("thread const ") + decl;
|
|
|
|
decl += " (&";
|
|
|
|
decl += to_expression(name_id);
|
|
|
|
decl += ")";
|
|
|
|
decl += type_to_array_glsl(type);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2019-02-04 05:58:46 +00:00
|
|
|
if (!address_space.empty())
|
|
|
|
decl = join(address_space, " ", decl);
|
2019-01-14 09:08:35 +00:00
|
|
|
decl += " ";
|
|
|
|
decl += to_expression(name_id);
|
|
|
|
}
|
2018-08-06 13:41:10 +00:00
|
|
|
}
|
|
|
|
else if (is_array(type) && !type_is_image)
|
2018-01-29 09:57:52 +00:00
|
|
|
{
|
2018-08-06 13:41:10 +00:00
|
|
|
// Arrays of images and samplers are special cased.
|
2019-02-04 05:58:46 +00:00
|
|
|
if (!address_space.empty())
|
|
|
|
decl = join(address_space, " ", decl);
|
2019-03-15 10:29:17 +00:00
|
|
|
|
2019-03-15 10:37:34 +00:00
|
|
|
if (msl_options.argument_buffers)
|
|
|
|
{
|
2019-05-27 14:53:30 +00:00
|
|
|
uint32_t desc_set = get_decoration(name_id, DecorationDescriptorSet);
|
|
|
|
if ((storage == StorageClassUniform || storage == StorageClassStorageBuffer) &&
|
|
|
|
descriptor_set_is_argument_buffer(desc_set))
|
|
|
|
{
|
|
|
|
// An awkward case where we need to emit *more* address space declarations (yay!).
|
|
|
|
// An example is where we pass down an array of buffer pointers to leaf functions.
|
|
|
|
// It's a constant array containing pointers to constants.
|
|
|
|
// The pointer array is always constant however. E.g.
|
|
|
|
// device SSBO * constant (&array)[N].
|
|
|
|
// const device SSBO * constant (&array)[N].
|
|
|
|
// constant SSBO * constant (&array)[N].
|
|
|
|
// However, this only matters for argument buffers, since for MSL 1.0 style codegen,
|
|
|
|
// we emit the buffer array on stack instead, and that seems to work just fine apparently.
|
2019-03-15 10:37:34 +00:00
|
|
|
decl += " constant";
|
2019-05-27 14:53:30 +00:00
|
|
|
}
|
2019-03-15 10:37:34 +00:00
|
|
|
}
|
2019-03-15 10:29:17 +00:00
|
|
|
|
2018-01-29 09:57:52 +00:00
|
|
|
decl += " (&";
|
2018-08-27 07:59:55 +00:00
|
|
|
decl += to_expression(name_id);
|
2018-01-29 09:57:52 +00:00
|
|
|
decl += ")";
|
|
|
|
decl += type_to_array_glsl(type);
|
|
|
|
}
|
2018-09-17 16:51:09 +00:00
|
|
|
else if (!opaque_handle)
|
2018-01-29 09:57:52 +00:00
|
|
|
{
|
2019-02-04 05:58:46 +00:00
|
|
|
// If this is going to be a reference to a variable pointer, the address space
|
|
|
|
// for the reference has to go before the '&', but after the '*'.
|
|
|
|
if (!address_space.empty())
|
|
|
|
{
|
|
|
|
if (decl.back() == '*')
|
|
|
|
decl += join(" ", address_space, " ");
|
|
|
|
else
|
|
|
|
decl = join(address_space, " ", decl);
|
|
|
|
}
|
2017-11-06 02:34:42 +00:00
|
|
|
decl += "&";
|
2018-01-29 09:57:52 +00:00
|
|
|
decl += " ";
|
2018-08-27 07:59:55 +00:00
|
|
|
decl += to_expression(name_id);
|
2018-01-29 09:57:52 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2019-02-04 05:58:46 +00:00
|
|
|
if (!address_space.empty())
|
|
|
|
decl = join(address_space, " ", decl);
|
2018-01-29 09:57:52 +00:00
|
|
|
decl += " ";
|
2018-08-27 07:59:55 +00:00
|
|
|
decl += to_expression(name_id);
|
2018-01-29 09:57:52 +00:00
|
|
|
}
|
2017-11-06 02:34:42 +00:00
|
|
|
|
|
|
|
return decl;
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2016-07-08 16:39:22 +00:00
|
|
|
// If we're currently in the entry point function, and the object
|
|
|
|
// has a qualified name, use it, otherwise use the standard name.
|
2017-03-07 12:27:04 +00:00
|
|
|
string CompilerMSL::to_name(uint32_t id, bool allow_alias) const
|
2016-07-08 16:39:22 +00:00
|
|
|
{
|
2018-10-05 09:30:57 +00:00
|
|
|
if (current_function && (current_function->self == ir.default_entry_point))
|
2016-10-24 13:24:24 +00:00
|
|
|
{
|
2019-01-10 13:04:01 +00:00
|
|
|
auto *m = ir.find_meta(id);
|
|
|
|
if (m && !m->decoration.qualified_alias.empty())
|
|
|
|
return m->decoration.qualified_alias;
|
2016-10-24 13:24:24 +00:00
|
|
|
}
|
|
|
|
return Compiler::to_name(id, allow_alias);
|
2016-07-08 16:39:22 +00:00
|
|
|
}
|
|
|
|
|
2016-11-11 02:07:50 +00:00
|
|
|
// Returns a name that combines the name of the struct with the name of the member, except for Builtins
|
2016-10-24 01:42:54 +00:00
|
|
|
string CompilerMSL::to_qualified_member_name(const SPIRType &type, uint32_t index)
|
|
|
|
{
|
2016-11-11 02:07:50 +00:00
|
|
|
// Don't qualify Builtin names because they are unique and are treated as such when building expressions
|
2019-01-08 10:03:59 +00:00
|
|
|
BuiltIn builtin = BuiltInMax;
|
2016-12-17 22:07:53 +00:00
|
|
|
if (is_member_builtin(type, index, &builtin))
|
2017-07-24 08:07:02 +00:00
|
|
|
return builtin_to_glsl(builtin, type.storage);
|
2016-11-11 02:07:50 +00:00
|
|
|
|
|
|
|
// Strip any underscore prefix from member name
|
2016-12-17 22:07:53 +00:00
|
|
|
string mbr_name = to_member_name(type, index);
|
2016-10-24 13:24:24 +00:00
|
|
|
size_t startPos = mbr_name.find_first_not_of("_");
|
2017-03-20 01:06:21 +00:00
|
|
|
mbr_name = (startPos != string::npos) ? mbr_name.substr(startPos) : "";
|
2016-10-24 13:24:24 +00:00
|
|
|
return join(to_name(type.self), "_", mbr_name);
|
2016-10-24 01:42:54 +00:00
|
|
|
}
|
|
|
|
|
2016-11-27 20:00:06 +00:00
|
|
|
// Ensures that the specified name is permanently usable by prepending a prefix
|
|
|
|
// if the first chars are _ and a digit, which indicate a transient name.
|
|
|
|
string CompilerMSL::ensure_valid_name(string name, string pfx)
|
2016-10-24 01:42:54 +00:00
|
|
|
{
|
2017-11-06 02:34:42 +00:00
|
|
|
return (name.size() >= 2 && name[0] == '_' && isdigit(name[1])) ? (pfx + name) : name;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Replace all names that match MSL keywords or Metal Standard Library functions.
|
|
|
|
void CompilerMSL::replace_illegal_names()
|
|
|
|
{
|
2018-04-03 13:36:35 +00:00
|
|
|
// FIXME: MSL and GLSL are doing two different things here.
|
|
|
|
// Agree on convention and remove this override.
|
2017-11-06 02:34:42 +00:00
|
|
|
static const unordered_set<string> keywords = {
|
2019-01-13 00:38:31 +00:00
|
|
|
"kernel",
|
|
|
|
"vertex",
|
|
|
|
"fragment",
|
|
|
|
"compute",
|
|
|
|
"bias",
|
|
|
|
"assert",
|
|
|
|
"VARIABLE_TRACEPOINT",
|
|
|
|
"STATIC_DATA_TRACEPOINT",
|
|
|
|
"STATIC_DATA_TRACEPOINT_V",
|
|
|
|
"METAL_ALIGN",
|
|
|
|
"METAL_ASM",
|
|
|
|
"METAL_CONST",
|
|
|
|
"METAL_DEPRECATED",
|
|
|
|
"METAL_ENABLE_IF",
|
|
|
|
"METAL_FUNC",
|
|
|
|
"METAL_INTERNAL",
|
|
|
|
"METAL_NON_NULL_RETURN",
|
|
|
|
"METAL_NORETURN",
|
|
|
|
"METAL_NOTHROW",
|
|
|
|
"METAL_PURE",
|
|
|
|
"METAL_UNAVAILABLE",
|
|
|
|
"METAL_IMPLICIT",
|
|
|
|
"METAL_EXPLICIT",
|
|
|
|
"METAL_CONST_ARG",
|
|
|
|
"METAL_ARG_UNIFORM",
|
|
|
|
"METAL_ZERO_ARG",
|
|
|
|
"METAL_VALID_LOD_ARG",
|
|
|
|
"METAL_VALID_LEVEL_ARG",
|
|
|
|
"METAL_VALID_STORE_ORDER",
|
|
|
|
"METAL_VALID_LOAD_ORDER",
|
|
|
|
"METAL_VALID_COMPARE_EXCHANGE_FAILURE_ORDER",
|
|
|
|
"METAL_COMPATIBLE_COMPARE_EXCHANGE_ORDERS",
|
|
|
|
"METAL_VALID_RENDER_TARGET",
|
|
|
|
"is_function_constant_defined",
|
|
|
|
"CHAR_BIT",
|
|
|
|
"SCHAR_MAX",
|
|
|
|
"SCHAR_MIN",
|
|
|
|
"UCHAR_MAX",
|
|
|
|
"CHAR_MAX",
|
|
|
|
"CHAR_MIN",
|
|
|
|
"USHRT_MAX",
|
|
|
|
"SHRT_MAX",
|
|
|
|
"SHRT_MIN",
|
|
|
|
"UINT_MAX",
|
|
|
|
"INT_MAX",
|
|
|
|
"INT_MIN",
|
|
|
|
"FLT_DIG",
|
|
|
|
"FLT_MANT_DIG",
|
|
|
|
"FLT_MAX_10_EXP",
|
|
|
|
"FLT_MAX_EXP",
|
|
|
|
"FLT_MIN_10_EXP",
|
|
|
|
"FLT_MIN_EXP",
|
|
|
|
"FLT_RADIX",
|
|
|
|
"FLT_MAX",
|
|
|
|
"FLT_MIN",
|
|
|
|
"FLT_EPSILON",
|
|
|
|
"FP_ILOGB0",
|
|
|
|
"FP_ILOGBNAN",
|
|
|
|
"MAXFLOAT",
|
|
|
|
"HUGE_VALF",
|
|
|
|
"INFINITY",
|
|
|
|
"NAN",
|
|
|
|
"M_E_F",
|
|
|
|
"M_LOG2E_F",
|
|
|
|
"M_LOG10E_F",
|
|
|
|
"M_LN2_F",
|
|
|
|
"M_LN10_F",
|
|
|
|
"M_PI_F",
|
|
|
|
"M_PI_2_F",
|
|
|
|
"M_PI_4_F",
|
|
|
|
"M_1_PI_F",
|
|
|
|
"M_2_PI_F",
|
|
|
|
"M_2_SQRTPI_F",
|
|
|
|
"M_SQRT2_F",
|
|
|
|
"M_SQRT1_2_F",
|
|
|
|
"HALF_DIG",
|
|
|
|
"HALF_MANT_DIG",
|
|
|
|
"HALF_MAX_10_EXP",
|
|
|
|
"HALF_MAX_EXP",
|
|
|
|
"HALF_MIN_10_EXP",
|
|
|
|
"HALF_MIN_EXP",
|
|
|
|
"HALF_RADIX",
|
|
|
|
"HALF_MAX",
|
|
|
|
"HALF_MIN",
|
|
|
|
"HALF_EPSILON",
|
|
|
|
"MAXHALF",
|
|
|
|
"HUGE_VALH",
|
|
|
|
"M_E_H",
|
|
|
|
"M_LOG2E_H",
|
|
|
|
"M_LOG10E_H",
|
|
|
|
"M_LN2_H",
|
|
|
|
"M_LN10_H",
|
|
|
|
"M_PI_H",
|
|
|
|
"M_PI_2_H",
|
|
|
|
"M_PI_4_H",
|
|
|
|
"M_1_PI_H",
|
|
|
|
"M_2_PI_H",
|
|
|
|
"M_2_SQRTPI_H",
|
|
|
|
"M_SQRT2_H",
|
|
|
|
"M_SQRT1_2_H",
|
|
|
|
"DBL_DIG",
|
|
|
|
"DBL_MANT_DIG",
|
|
|
|
"DBL_MAX_10_EXP",
|
|
|
|
"DBL_MAX_EXP",
|
|
|
|
"DBL_MIN_10_EXP",
|
|
|
|
"DBL_MIN_EXP",
|
|
|
|
"DBL_RADIX",
|
|
|
|
"DBL_MAX",
|
|
|
|
"DBL_MIN",
|
|
|
|
"DBL_EPSILON",
|
|
|
|
"HUGE_VAL",
|
|
|
|
"M_E",
|
|
|
|
"M_LOG2E",
|
|
|
|
"M_LOG10E",
|
|
|
|
"M_LN2",
|
|
|
|
"M_LN10",
|
|
|
|
"M_PI",
|
|
|
|
"M_PI_2",
|
|
|
|
"M_PI_4",
|
|
|
|
"M_1_PI",
|
|
|
|
"M_2_PI",
|
|
|
|
"M_2_SQRTPI",
|
|
|
|
"M_SQRT2",
|
|
|
|
"M_SQRT1_2",
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
"quad_broadcast",
|
2017-11-06 02:34:42 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static const unordered_set<string> illegal_func_names = {
|
2017-12-05 08:58:12 +00:00
|
|
|
"main",
|
|
|
|
"saturate",
|
2019-01-13 00:38:31 +00:00
|
|
|
"assert",
|
|
|
|
"VARIABLE_TRACEPOINT",
|
|
|
|
"STATIC_DATA_TRACEPOINT",
|
|
|
|
"STATIC_DATA_TRACEPOINT_V",
|
|
|
|
"METAL_ALIGN",
|
|
|
|
"METAL_ASM",
|
|
|
|
"METAL_CONST",
|
|
|
|
"METAL_DEPRECATED",
|
|
|
|
"METAL_ENABLE_IF",
|
|
|
|
"METAL_FUNC",
|
|
|
|
"METAL_INTERNAL",
|
|
|
|
"METAL_NON_NULL_RETURN",
|
|
|
|
"METAL_NORETURN",
|
|
|
|
"METAL_NOTHROW",
|
|
|
|
"METAL_PURE",
|
|
|
|
"METAL_UNAVAILABLE",
|
|
|
|
"METAL_IMPLICIT",
|
|
|
|
"METAL_EXPLICIT",
|
|
|
|
"METAL_CONST_ARG",
|
|
|
|
"METAL_ARG_UNIFORM",
|
|
|
|
"METAL_ZERO_ARG",
|
|
|
|
"METAL_VALID_LOD_ARG",
|
|
|
|
"METAL_VALID_LEVEL_ARG",
|
|
|
|
"METAL_VALID_STORE_ORDER",
|
|
|
|
"METAL_VALID_LOAD_ORDER",
|
|
|
|
"METAL_VALID_COMPARE_EXCHANGE_FAILURE_ORDER",
|
|
|
|
"METAL_COMPATIBLE_COMPARE_EXCHANGE_ORDERS",
|
|
|
|
"METAL_VALID_RENDER_TARGET",
|
|
|
|
"is_function_constant_defined",
|
|
|
|
"CHAR_BIT",
|
|
|
|
"SCHAR_MAX",
|
|
|
|
"SCHAR_MIN",
|
|
|
|
"UCHAR_MAX",
|
|
|
|
"CHAR_MAX",
|
|
|
|
"CHAR_MIN",
|
|
|
|
"USHRT_MAX",
|
|
|
|
"SHRT_MAX",
|
|
|
|
"SHRT_MIN",
|
|
|
|
"UINT_MAX",
|
|
|
|
"INT_MAX",
|
|
|
|
"INT_MIN",
|
|
|
|
"FLT_DIG",
|
|
|
|
"FLT_MANT_DIG",
|
|
|
|
"FLT_MAX_10_EXP",
|
|
|
|
"FLT_MAX_EXP",
|
|
|
|
"FLT_MIN_10_EXP",
|
|
|
|
"FLT_MIN_EXP",
|
|
|
|
"FLT_RADIX",
|
|
|
|
"FLT_MAX",
|
|
|
|
"FLT_MIN",
|
|
|
|
"FLT_EPSILON",
|
|
|
|
"FP_ILOGB0",
|
|
|
|
"FP_ILOGBNAN",
|
|
|
|
"MAXFLOAT",
|
|
|
|
"HUGE_VALF",
|
|
|
|
"INFINITY",
|
|
|
|
"NAN",
|
|
|
|
"M_E_F",
|
|
|
|
"M_LOG2E_F",
|
|
|
|
"M_LOG10E_F",
|
|
|
|
"M_LN2_F",
|
|
|
|
"M_LN10_F",
|
|
|
|
"M_PI_F",
|
|
|
|
"M_PI_2_F",
|
|
|
|
"M_PI_4_F",
|
|
|
|
"M_1_PI_F",
|
|
|
|
"M_2_PI_F",
|
|
|
|
"M_2_SQRTPI_F",
|
|
|
|
"M_SQRT2_F",
|
|
|
|
"M_SQRT1_2_F",
|
|
|
|
"HALF_DIG",
|
|
|
|
"HALF_MANT_DIG",
|
|
|
|
"HALF_MAX_10_EXP",
|
|
|
|
"HALF_MAX_EXP",
|
|
|
|
"HALF_MIN_10_EXP",
|
|
|
|
"HALF_MIN_EXP",
|
|
|
|
"HALF_RADIX",
|
|
|
|
"HALF_MAX",
|
|
|
|
"HALF_MIN",
|
|
|
|
"HALF_EPSILON",
|
|
|
|
"MAXHALF",
|
|
|
|
"HUGE_VALH",
|
|
|
|
"M_E_H",
|
|
|
|
"M_LOG2E_H",
|
|
|
|
"M_LOG10E_H",
|
|
|
|
"M_LN2_H",
|
|
|
|
"M_LN10_H",
|
|
|
|
"M_PI_H",
|
|
|
|
"M_PI_2_H",
|
|
|
|
"M_PI_4_H",
|
|
|
|
"M_1_PI_H",
|
|
|
|
"M_2_PI_H",
|
|
|
|
"M_2_SQRTPI_H",
|
|
|
|
"M_SQRT2_H",
|
|
|
|
"M_SQRT1_2_H",
|
|
|
|
"DBL_DIG",
|
|
|
|
"DBL_MANT_DIG",
|
|
|
|
"DBL_MAX_10_EXP",
|
|
|
|
"DBL_MAX_EXP",
|
|
|
|
"DBL_MIN_10_EXP",
|
|
|
|
"DBL_MIN_EXP",
|
|
|
|
"DBL_RADIX",
|
|
|
|
"DBL_MAX",
|
|
|
|
"DBL_MIN",
|
|
|
|
"DBL_EPSILON",
|
|
|
|
"HUGE_VAL",
|
|
|
|
"M_E",
|
|
|
|
"M_LOG2E",
|
|
|
|
"M_LOG10E",
|
|
|
|
"M_LN2",
|
|
|
|
"M_LN10",
|
|
|
|
"M_PI",
|
|
|
|
"M_PI_2",
|
|
|
|
"M_PI_4",
|
|
|
|
"M_1_PI",
|
|
|
|
"M_2_PI",
|
|
|
|
"M_2_SQRTPI",
|
|
|
|
"M_SQRT2",
|
|
|
|
"M_SQRT1_2",
|
2017-11-06 02:34:42 +00:00
|
|
|
};
|
|
|
|
|
2019-01-10 08:49:33 +00:00
|
|
|
ir.for_each_typed_id<SPIRVariable>([&](uint32_t self, SPIRVariable &) {
|
|
|
|
auto &dec = ir.meta[self].decoration;
|
|
|
|
if (keywords.find(dec.alias) != end(keywords))
|
|
|
|
dec.alias += "0";
|
|
|
|
});
|
2017-11-06 02:34:42 +00:00
|
|
|
|
2019-01-10 08:49:33 +00:00
|
|
|
ir.for_each_typed_id<SPIRFunction>([&](uint32_t self, SPIRFunction &) {
|
|
|
|
auto &dec = ir.meta[self].decoration;
|
|
|
|
if (illegal_func_names.find(dec.alias) != end(illegal_func_names))
|
|
|
|
dec.alias += "0";
|
|
|
|
});
|
2017-11-06 02:34:42 +00:00
|
|
|
|
2019-01-10 08:49:33 +00:00
|
|
|
ir.for_each_typed_id<SPIRType>([&](uint32_t self, SPIRType &) {
|
|
|
|
for (auto &mbr_dec : ir.meta[self].members)
|
|
|
|
if (keywords.find(mbr_dec.alias) != end(keywords))
|
|
|
|
mbr_dec.alias += "0";
|
|
|
|
});
|
2017-11-06 02:34:42 +00:00
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
for (auto &entry : ir.entry_points)
|
2017-01-15 15:39:03 +00:00
|
|
|
{
|
2017-11-06 02:34:42 +00:00
|
|
|
// Change both the entry point name and the alias, to keep them synced.
|
|
|
|
string &ep_name = entry.second.name;
|
|
|
|
if (illegal_func_names.find(ep_name) != end(illegal_func_names))
|
|
|
|
ep_name += "0";
|
2017-11-13 12:49:11 +00:00
|
|
|
|
|
|
|
// Always write this because entry point might have been renamed earlier.
|
2018-10-05 09:30:57 +00:00
|
|
|
ir.meta[entry.first].decoration.alias = ep_name;
|
2017-01-15 15:39:03 +00:00
|
|
|
}
|
2018-04-03 13:36:35 +00:00
|
|
|
|
|
|
|
CompilerGLSL::replace_illegal_names();
|
2016-10-24 01:42:54 +00:00
|
|
|
}
|
|
|
|
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
string CompilerMSL::to_member_reference(uint32_t base, const SPIRType &type, uint32_t index, bool ptr_chain)
|
2018-09-27 01:06:05 +00:00
|
|
|
{
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
auto *var = maybe_get<SPIRVariable>(base);
|
2018-09-27 01:06:05 +00:00
|
|
|
// If this is a buffer array, we have to dereference the buffer pointers.
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
// Otherwise, if this is a pointer expression, dereference it.
|
2019-03-14 09:29:34 +00:00
|
|
|
|
|
|
|
bool declared_as_pointer = false;
|
|
|
|
|
|
|
|
if (var)
|
|
|
|
{
|
|
|
|
bool is_buffer_variable = var->storage == StorageClassUniform || var->storage == StorageClassStorageBuffer;
|
|
|
|
declared_as_pointer = is_buffer_variable && is_array(get<SPIRType>(var->basetype));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (declared_as_pointer || (!ptr_chain && should_dereference(base)))
|
2018-09-27 01:06:05 +00:00
|
|
|
return join("->", to_member_name(type, index));
|
2018-09-27 16:01:46 +00:00
|
|
|
else
|
|
|
|
return join(".", to_member_name(type, index));
|
2018-09-27 01:06:05 +00:00
|
|
|
}
|
|
|
|
|
2017-11-06 02:34:42 +00:00
|
|
|
string CompilerMSL::to_qualifiers_glsl(uint32_t id)
|
|
|
|
{
|
|
|
|
string quals;
|
|
|
|
|
|
|
|
auto &type = expression_type(id);
|
|
|
|
if (type.storage == StorageClassWorkgroup)
|
|
|
|
quals += "threadgroup ";
|
|
|
|
|
|
|
|
return quals;
|
|
|
|
}
|
|
|
|
|
2017-05-30 00:45:05 +00:00
|
|
|
// The optional id parameter indicates the object whose type we are trying
|
|
|
|
// to find the description for. It is optional. Most type descriptions do not
|
|
|
|
// depend on a specific object's use of that type.
|
|
|
|
string CompilerMSL::type_to_glsl(const SPIRType &type, uint32_t id)
|
2016-04-06 21:42:27 +00:00
|
|
|
{
|
2017-05-19 22:14:08 +00:00
|
|
|
string type_name;
|
|
|
|
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
// Pointer?
|
|
|
|
if (type.pointer)
|
|
|
|
{
|
2019-03-15 11:05:35 +00:00
|
|
|
type_name = join(get_type_address_space(type, id), " ", type_to_glsl(get<SPIRType>(type.parent_type), id));
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
switch (type.basetype)
|
|
|
|
{
|
|
|
|
case SPIRType::Image:
|
|
|
|
case SPIRType::SampledImage:
|
|
|
|
case SPIRType::Sampler:
|
|
|
|
// These are handles.
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
// Anything else can be a raw pointer.
|
|
|
|
type_name += "*";
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return type_name;
|
|
|
|
}
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
switch (type.basetype)
|
|
|
|
{
|
|
|
|
case SPIRType::Struct:
|
|
|
|
// Need OpName lookup here to get a "sensible" name for a struct.
|
|
|
|
return to_name(type.self);
|
|
|
|
|
|
|
|
case SPIRType::Image:
|
|
|
|
case SPIRType::SampledImage:
|
2017-05-30 00:45:05 +00:00
|
|
|
return image_type_glsl(type, id);
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
case SPIRType::Sampler:
|
2018-04-03 12:00:34 +00:00
|
|
|
return sampler_type(type);
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
case SPIRType::Void:
|
|
|
|
return "void";
|
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
case SPIRType::AtomicCounter:
|
|
|
|
return "atomic_uint";
|
|
|
|
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
case SPIRType::ControlPointArray:
|
|
|
|
return join("patch_control_point<", type_to_glsl(get<SPIRType>(type.parent_type), id), ">");
|
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
// Scalars
|
|
|
|
case SPIRType::Boolean:
|
|
|
|
type_name = "bool";
|
|
|
|
break;
|
|
|
|
case SPIRType::Char:
|
2018-11-01 22:20:07 +00:00
|
|
|
case SPIRType::SByte:
|
2017-05-19 22:14:08 +00:00
|
|
|
type_name = "char";
|
|
|
|
break;
|
2018-11-01 22:20:07 +00:00
|
|
|
case SPIRType::UByte:
|
|
|
|
type_name = "uchar";
|
|
|
|
break;
|
|
|
|
case SPIRType::Short:
|
|
|
|
type_name = "short";
|
|
|
|
break;
|
|
|
|
case SPIRType::UShort:
|
|
|
|
type_name = "ushort";
|
|
|
|
break;
|
2017-05-19 22:14:08 +00:00
|
|
|
case SPIRType::Int:
|
2018-11-01 22:20:07 +00:00
|
|
|
type_name = "int";
|
2017-05-19 22:14:08 +00:00
|
|
|
break;
|
|
|
|
case SPIRType::UInt:
|
2018-11-01 22:20:07 +00:00
|
|
|
type_name = "uint";
|
2017-05-19 22:14:08 +00:00
|
|
|
break;
|
|
|
|
case SPIRType::Int64:
|
2019-06-11 08:45:22 +00:00
|
|
|
if (!msl_options.supports_msl_version(2, 2))
|
|
|
|
SPIRV_CROSS_THROW("64-bit integers are only supported in MSL 2.2 and above.");
|
|
|
|
type_name = "long";
|
2017-05-19 22:14:08 +00:00
|
|
|
break;
|
|
|
|
case SPIRType::UInt64:
|
2019-06-11 08:45:22 +00:00
|
|
|
if (!msl_options.supports_msl_version(2, 2))
|
|
|
|
SPIRV_CROSS_THROW("64-bit integers are only supported in MSL 2.2 and above.");
|
|
|
|
type_name = "ulong";
|
2017-05-19 22:14:08 +00:00
|
|
|
break;
|
2018-03-06 16:07:59 +00:00
|
|
|
case SPIRType::Half:
|
|
|
|
type_name = "half";
|
|
|
|
break;
|
2017-05-19 22:14:08 +00:00
|
|
|
case SPIRType::Float:
|
2018-03-06 16:07:59 +00:00
|
|
|
type_name = "float";
|
2017-05-19 22:14:08 +00:00
|
|
|
break;
|
|
|
|
case SPIRType::Double:
|
|
|
|
type_name = "double"; // Currently unsupported
|
2016-05-05 07:33:18 +00:00
|
|
|
break;
|
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
default:
|
|
|
|
return "unknown_type";
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
2017-05-19 22:14:08 +00:00
|
|
|
|
|
|
|
// Matrix?
|
|
|
|
if (type.columns > 1)
|
|
|
|
type_name += to_string(type.columns) + "x";
|
|
|
|
|
|
|
|
// Vector or Matrix?
|
|
|
|
if (type.vecsize > 1)
|
|
|
|
type_name += to_string(type.vecsize);
|
|
|
|
|
|
|
|
return type_name;
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2018-04-03 12:00:34 +00:00
|
|
|
std::string CompilerMSL::sampler_type(const SPIRType &type)
|
|
|
|
{
|
|
|
|
if (!type.array.empty())
|
|
|
|
{
|
2018-04-03 12:26:24 +00:00
|
|
|
if (!msl_options.supports_msl_version(2))
|
|
|
|
SPIRV_CROSS_THROW("MSL 2.0 or greater is required for arrays of samplers.");
|
|
|
|
|
2018-09-27 16:01:46 +00:00
|
|
|
if (type.array.size() > 1)
|
|
|
|
SPIRV_CROSS_THROW("Arrays of arrays of samplers are not supported in MSL.");
|
|
|
|
|
2018-04-03 12:00:34 +00:00
|
|
|
// Arrays of samplers in MSL must be declared with a special array<T, N> syntax ala C++11 std::array.
|
2018-11-01 13:56:25 +00:00
|
|
|
uint32_t array_size = to_array_size_literal(type);
|
2018-04-03 12:00:34 +00:00
|
|
|
if (array_size == 0)
|
|
|
|
SPIRV_CROSS_THROW("Unsized array of samplers is not supported in MSL.");
|
2018-06-24 19:06:12 +00:00
|
|
|
|
2019-01-08 18:54:40 +00:00
|
|
|
auto &parent = get<SPIRType>(get_pointee_type(type).parent_type);
|
2018-06-24 19:06:12 +00:00
|
|
|
return join("array<", sampler_type(parent), ", ", array_size, ">");
|
2018-04-03 12:00:34 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
return "sampler";
|
|
|
|
}
|
|
|
|
|
2018-07-26 04:50:33 +00:00
|
|
|
// Returns an MSL string describing the SPIR-V image type
|
2017-05-30 00:45:05 +00:00
|
|
|
string CompilerMSL::image_type_glsl(const SPIRType &type, uint32_t id)
|
2016-04-06 21:42:27 +00:00
|
|
|
{
|
2018-04-10 10:31:13 +00:00
|
|
|
auto *var = maybe_get<SPIRVariable>(id);
|
|
|
|
if (var && var->basevariable)
|
|
|
|
{
|
|
|
|
// For comparison images, check against the base variable,
|
|
|
|
// and not the fake ID which might have been generated for this variable.
|
|
|
|
id = var->basevariable;
|
|
|
|
}
|
|
|
|
|
2018-04-03 12:00:34 +00:00
|
|
|
if (!type.array.empty())
|
|
|
|
{
|
2018-09-27 01:06:05 +00:00
|
|
|
uint32_t major = 2, minor = 0;
|
|
|
|
if (msl_options.is_ios())
|
|
|
|
{
|
|
|
|
major = 1;
|
|
|
|
minor = 2;
|
|
|
|
}
|
|
|
|
if (!msl_options.supports_msl_version(major, minor))
|
2018-09-27 16:01:46 +00:00
|
|
|
{
|
|
|
|
if (msl_options.is_ios())
|
|
|
|
SPIRV_CROSS_THROW("MSL 1.2 or greater is required for arrays of textures.");
|
|
|
|
else
|
|
|
|
SPIRV_CROSS_THROW("MSL 2.0 or greater is required for arrays of textures.");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type.array.size() > 1)
|
|
|
|
SPIRV_CROSS_THROW("Arrays of arrays of textures are not supported in MSL.");
|
2018-04-03 12:26:24 +00:00
|
|
|
|
2018-04-03 12:00:34 +00:00
|
|
|
// Arrays of images in MSL must be declared with a special array<T, N> syntax ala C++11 std::array.
|
2018-11-01 13:56:25 +00:00
|
|
|
uint32_t array_size = to_array_size_literal(type);
|
2018-04-03 12:00:34 +00:00
|
|
|
if (array_size == 0)
|
|
|
|
SPIRV_CROSS_THROW("Unsized array of images is not supported in MSL.");
|
2018-06-24 19:06:12 +00:00
|
|
|
|
2019-01-08 18:54:40 +00:00
|
|
|
auto &parent = get<SPIRType>(get_pointee_type(type).parent_type);
|
2018-06-24 19:06:12 +00:00
|
|
|
return join("array<", image_type_glsl(parent, id), ", ", array_size, ">");
|
2018-04-03 12:00:34 +00:00
|
|
|
}
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
string img_type_name;
|
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
// Bypass pointers because we need the real image struct
|
|
|
|
auto &img_type = get<SPIRType>(type.self).image;
|
2018-07-04 12:25:10 +00:00
|
|
|
if (image_is_comparison(type, id))
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
|
|
|
switch (img_type.dim)
|
|
|
|
{
|
2017-08-11 18:54:58 +00:00
|
|
|
case Dim1D:
|
|
|
|
img_type_name += "depth1d_unsupported_by_metal";
|
|
|
|
break;
|
2017-05-30 00:45:05 +00:00
|
|
|
case Dim2D:
|
2018-09-03 08:21:59 +00:00
|
|
|
if (img_type.ms && img_type.arrayed)
|
2018-09-03 09:02:31 +00:00
|
|
|
{
|
|
|
|
if (!msl_options.supports_msl_version(2, 1))
|
|
|
|
SPIRV_CROSS_THROW("Multisampled array textures are supported from 2.1.");
|
|
|
|
img_type_name += "depth2d_ms_array";
|
|
|
|
}
|
|
|
|
else if (img_type.ms)
|
|
|
|
img_type_name += "depth2d_ms";
|
|
|
|
else if (img_type.arrayed)
|
|
|
|
img_type_name += "depth2d_array";
|
|
|
|
else
|
|
|
|
img_type_name += "depth2d";
|
2016-05-05 07:33:18 +00:00
|
|
|
break;
|
2017-08-11 18:54:58 +00:00
|
|
|
case Dim3D:
|
|
|
|
img_type_name += "depth3d_unsupported_by_metal";
|
|
|
|
break;
|
2017-05-30 00:45:05 +00:00
|
|
|
case DimCube:
|
2016-05-05 07:33:18 +00:00
|
|
|
img_type_name += (img_type.arrayed ? "depthcube_array" : "depthcube");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
img_type_name += "unknown_depth_texture_type";
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
switch (img_type.dim)
|
|
|
|
{
|
2017-05-30 00:45:05 +00:00
|
|
|
case Dim1D:
|
2016-05-05 07:33:18 +00:00
|
|
|
img_type_name += (img_type.arrayed ? "texture1d_array" : "texture1d");
|
|
|
|
break;
|
2017-05-30 00:45:05 +00:00
|
|
|
case DimBuffer:
|
2019-04-23 10:17:21 +00:00
|
|
|
if (img_type.ms || img_type.arrayed)
|
|
|
|
SPIRV_CROSS_THROW("Cannot use texel buffers with multisampling or array layers.");
|
|
|
|
|
|
|
|
if (msl_options.texture_buffer_native)
|
|
|
|
{
|
|
|
|
if (!msl_options.supports_msl_version(2, 1))
|
|
|
|
SPIRV_CROSS_THROW("Native texture_buffer type is only supported in MSL 2.1.");
|
|
|
|
img_type_name = "texture_buffer";
|
|
|
|
}
|
|
|
|
else
|
|
|
|
img_type_name += "texture2d";
|
|
|
|
break;
|
2017-05-30 00:45:05 +00:00
|
|
|
case Dim2D:
|
2018-02-09 10:27:23 +00:00
|
|
|
case DimSubpassData:
|
2018-09-03 08:21:59 +00:00
|
|
|
if (img_type.ms && img_type.arrayed)
|
2018-09-03 09:02:31 +00:00
|
|
|
{
|
|
|
|
if (!msl_options.supports_msl_version(2, 1))
|
|
|
|
SPIRV_CROSS_THROW("Multisampled array textures are supported from 2.1.");
|
|
|
|
img_type_name += "texture2d_ms_array";
|
|
|
|
}
|
|
|
|
else if (img_type.ms)
|
|
|
|
img_type_name += "texture2d_ms";
|
|
|
|
else if (img_type.arrayed)
|
|
|
|
img_type_name += "texture2d_array";
|
|
|
|
else
|
|
|
|
img_type_name += "texture2d";
|
2016-05-05 07:33:18 +00:00
|
|
|
break;
|
2017-05-30 00:45:05 +00:00
|
|
|
case Dim3D:
|
2016-11-27 20:00:06 +00:00
|
|
|
img_type_name += "texture3d";
|
2016-05-05 07:33:18 +00:00
|
|
|
break;
|
2017-05-30 00:45:05 +00:00
|
|
|
case DimCube:
|
2016-05-05 07:33:18 +00:00
|
|
|
img_type_name += (img_type.arrayed ? "texturecube_array" : "texturecube");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
img_type_name += "unknown_texture_type";
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append the pixel type
|
2017-05-19 22:14:08 +00:00
|
|
|
img_type_name += "<";
|
2017-05-30 00:45:05 +00:00
|
|
|
img_type_name += type_to_glsl(get<SPIRType>(img_type.type));
|
2017-05-19 22:14:08 +00:00
|
|
|
|
2017-05-30 00:45:05 +00:00
|
|
|
// For unsampled images, append the sample/read/write access qualifier.
|
|
|
|
// For kernel images, the access qualifier my be supplied directly by SPIR-V.
|
|
|
|
// Otherwise it may be set based on whether the image is read from or written to within the shader.
|
2018-02-09 10:27:23 +00:00
|
|
|
if (type.basetype == SPIRType::Image && type.image.sampled == 2 && type.image.dim != DimSubpassData)
|
2017-05-19 22:14:08 +00:00
|
|
|
{
|
2017-05-30 00:45:05 +00:00
|
|
|
switch (img_type.access)
|
|
|
|
{
|
|
|
|
case AccessQualifierReadOnly:
|
|
|
|
img_type_name += ", access::read";
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AccessQualifierWriteOnly:
|
|
|
|
img_type_name += ", access::write";
|
|
|
|
break;
|
2017-05-19 22:14:08 +00:00
|
|
|
|
2017-05-30 00:45:05 +00:00
|
|
|
case AccessQualifierReadWrite:
|
|
|
|
img_type_name += ", access::read_write";
|
|
|
|
break;
|
2017-05-19 22:14:08 +00:00
|
|
|
|
2017-05-30 00:45:05 +00:00
|
|
|
default:
|
|
|
|
{
|
|
|
|
auto *p_var = maybe_get_backing_variable(id);
|
2017-12-06 17:51:23 +00:00
|
|
|
if (p_var && p_var->basevariable)
|
|
|
|
p_var = maybe_get<SPIRVariable>(p_var->basevariable);
|
2017-05-30 00:45:05 +00:00
|
|
|
if (p_var && !has_decoration(p_var->self, DecorationNonWritable))
|
|
|
|
{
|
|
|
|
img_type_name += ", access::";
|
|
|
|
|
|
|
|
if (!has_decoration(p_var->self, DecorationNonReadable))
|
|
|
|
img_type_name += "read_";
|
|
|
|
|
|
|
|
img_type_name += "write";
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2017-05-19 22:14:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
img_type_name += ">";
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
return img_type_name;
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
void CompilerMSL::emit_subgroup_op(const Instruction &i)
|
|
|
|
{
|
|
|
|
const uint32_t *ops = stream(i);
|
|
|
|
auto op = static_cast<Op>(i.op);
|
|
|
|
|
|
|
|
// Metal 2.0 is required. iOS only supports quad ops. macOS only supports
|
|
|
|
// broadcast and shuffle on 10.13 (2.0), with full support in 10.14 (2.1).
|
|
|
|
// Note that iOS makes no distinction between a quad-group and a subgroup;
|
|
|
|
// all subgroups are quad-groups there.
|
|
|
|
if (!msl_options.supports_msl_version(2))
|
|
|
|
SPIRV_CROSS_THROW("Subgroups are only supported in Metal 2.0 and up.");
|
|
|
|
|
|
|
|
if (msl_options.is_ios())
|
|
|
|
{
|
|
|
|
switch (op)
|
|
|
|
{
|
|
|
|
default:
|
|
|
|
SPIRV_CROSS_THROW("iOS only supports quad-group operations.");
|
|
|
|
case OpGroupNonUniformBroadcast:
|
|
|
|
case OpGroupNonUniformShuffle:
|
|
|
|
case OpGroupNonUniformShuffleXor:
|
|
|
|
case OpGroupNonUniformShuffleUp:
|
|
|
|
case OpGroupNonUniformShuffleDown:
|
|
|
|
case OpGroupNonUniformQuadSwap:
|
|
|
|
case OpGroupNonUniformQuadBroadcast:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (msl_options.is_macos() && !msl_options.supports_msl_version(2, 1))
|
|
|
|
{
|
|
|
|
switch (op)
|
|
|
|
{
|
|
|
|
default:
|
|
|
|
SPIRV_CROSS_THROW("Subgroup ops beyond broadcast and shuffle on macOS require Metal 2.0 and up.");
|
|
|
|
case OpGroupNonUniformBroadcast:
|
|
|
|
case OpGroupNonUniformShuffle:
|
|
|
|
case OpGroupNonUniformShuffleXor:
|
|
|
|
case OpGroupNonUniformShuffleUp:
|
|
|
|
case OpGroupNonUniformShuffleDown:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t result_type = ops[0];
|
|
|
|
uint32_t id = ops[1];
|
|
|
|
|
|
|
|
auto scope = static_cast<Scope>(get<SPIRConstant>(ops[2]).scalar());
|
|
|
|
if (scope != ScopeSubgroup)
|
|
|
|
SPIRV_CROSS_THROW("Only subgroup scope is supported.");
|
|
|
|
|
|
|
|
switch (op)
|
|
|
|
{
|
|
|
|
case OpGroupNonUniformElect:
|
2019-05-16 18:42:09 +00:00
|
|
|
emit_op(result_type, id, "simd_is_first()", true);
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case OpGroupNonUniformBroadcast:
|
|
|
|
emit_binary_func_op(result_type, id, ops[3], ops[4],
|
|
|
|
msl_options.is_ios() ? "quad_broadcast" : "simd_broadcast");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OpGroupNonUniformBroadcastFirst:
|
|
|
|
emit_unary_func_op(result_type, id, ops[3], "simd_broadcast_first");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OpGroupNonUniformBallot:
|
|
|
|
emit_unary_func_op(result_type, id, ops[3], "spvSubgroupBallot");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OpGroupNonUniformInverseBallot:
|
|
|
|
emit_binary_func_op(result_type, id, ops[3], builtin_subgroup_invocation_id_id, "spvSubgroupBallotBitExtract");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OpGroupNonUniformBallotBitExtract:
|
|
|
|
emit_binary_func_op(result_type, id, ops[3], ops[4], "spvSubgroupBallotBitExtract");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OpGroupNonUniformBallotFindLSB:
|
|
|
|
emit_unary_func_op(result_type, id, ops[3], "spvSubgroupBallotFindLSB");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OpGroupNonUniformBallotFindMSB:
|
|
|
|
emit_unary_func_op(result_type, id, ops[3], "spvSubgroupBallotFindMSB");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OpGroupNonUniformBallotBitCount:
|
|
|
|
{
|
|
|
|
auto operation = static_cast<GroupOperation>(ops[3]);
|
|
|
|
if (operation == GroupOperationReduce)
|
|
|
|
emit_unary_func_op(result_type, id, ops[4], "spvSubgroupBallotBitCount");
|
|
|
|
else if (operation == GroupOperationInclusiveScan)
|
|
|
|
emit_binary_func_op(result_type, id, ops[4], builtin_subgroup_invocation_id_id,
|
|
|
|
"spvSubgroupBallotInclusiveBitCount");
|
|
|
|
else if (operation == GroupOperationExclusiveScan)
|
|
|
|
emit_binary_func_op(result_type, id, ops[4], builtin_subgroup_invocation_id_id,
|
|
|
|
"spvSubgroupBallotExclusiveBitCount");
|
|
|
|
else
|
|
|
|
SPIRV_CROSS_THROW("Invalid BitCount operation.");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpGroupNonUniformShuffle:
|
|
|
|
emit_binary_func_op(result_type, id, ops[3], ops[4], msl_options.is_ios() ? "quad_shuffle" : "simd_shuffle");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OpGroupNonUniformShuffleXor:
|
|
|
|
emit_binary_func_op(result_type, id, ops[3], ops[4],
|
|
|
|
msl_options.is_ios() ? "quad_shuffle_xor" : "simd_shuffle_xor");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OpGroupNonUniformShuffleUp:
|
|
|
|
emit_binary_func_op(result_type, id, ops[3], ops[4],
|
|
|
|
msl_options.is_ios() ? "quad_shuffle_up" : "simd_shuffle_up");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OpGroupNonUniformShuffleDown:
|
|
|
|
emit_binary_func_op(result_type, id, ops[3], ops[4],
|
|
|
|
msl_options.is_ios() ? "quad_shuffle_down" : "simd_shuffle_down");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OpGroupNonUniformAll:
|
|
|
|
emit_unary_func_op(result_type, id, ops[3], "simd_all");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OpGroupNonUniformAny:
|
|
|
|
emit_unary_func_op(result_type, id, ops[3], "simd_any");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OpGroupNonUniformAllEqual:
|
|
|
|
emit_unary_func_op(result_type, id, ops[3], "spvSubgroupAllEqual");
|
|
|
|
break;
|
|
|
|
|
|
|
|
// clang-format off
|
|
|
|
#define MSL_GROUP_OP(op, msl_op) \
|
|
|
|
case OpGroupNonUniform##op: \
|
|
|
|
{ \
|
|
|
|
auto operation = static_cast<GroupOperation>(ops[3]); \
|
|
|
|
if (operation == GroupOperationReduce) \
|
|
|
|
emit_unary_func_op(result_type, id, ops[4], "simd_" #msl_op); \
|
|
|
|
else if (operation == GroupOperationInclusiveScan) \
|
|
|
|
emit_unary_func_op(result_type, id, ops[4], "simd_prefix_inclusive_" #msl_op); \
|
|
|
|
else if (operation == GroupOperationExclusiveScan) \
|
|
|
|
emit_unary_func_op(result_type, id, ops[4], "simd_prefix_exclusive_" #msl_op); \
|
|
|
|
else if (operation == GroupOperationClusteredReduce) \
|
|
|
|
{ \
|
|
|
|
/* Only cluster sizes of 4 are supported. */ \
|
|
|
|
uint32_t cluster_size = get<SPIRConstant>(ops[5]).scalar(); \
|
|
|
|
if (cluster_size != 4) \
|
|
|
|
SPIRV_CROSS_THROW("Metal only supports quad ClusteredReduce."); \
|
|
|
|
emit_unary_func_op(result_type, id, ops[4], "quad_" #msl_op); \
|
|
|
|
} \
|
|
|
|
else \
|
|
|
|
SPIRV_CROSS_THROW("Invalid group operation."); \
|
|
|
|
break; \
|
|
|
|
}
|
|
|
|
MSL_GROUP_OP(FAdd, sum)
|
|
|
|
MSL_GROUP_OP(FMul, product)
|
|
|
|
MSL_GROUP_OP(IAdd, sum)
|
|
|
|
MSL_GROUP_OP(IMul, product)
|
|
|
|
#undef MSL_GROUP_OP
|
|
|
|
// The others, unfortunately, don't support InclusiveScan or ExclusiveScan.
|
|
|
|
#define MSL_GROUP_OP(op, msl_op) \
|
|
|
|
case OpGroupNonUniform##op: \
|
|
|
|
{ \
|
|
|
|
auto operation = static_cast<GroupOperation>(ops[3]); \
|
|
|
|
if (operation == GroupOperationReduce) \
|
|
|
|
emit_unary_func_op(result_type, id, ops[4], "simd_" #msl_op); \
|
|
|
|
else if (operation == GroupOperationInclusiveScan) \
|
|
|
|
SPIRV_CROSS_THROW("Metal doesn't support InclusiveScan for OpGroupNonUniform" #op "."); \
|
|
|
|
else if (operation == GroupOperationExclusiveScan) \
|
|
|
|
SPIRV_CROSS_THROW("Metal doesn't support ExclusiveScan for OpGroupNonUniform" #op "."); \
|
|
|
|
else if (operation == GroupOperationClusteredReduce) \
|
|
|
|
{ \
|
|
|
|
/* Only cluster sizes of 4 are supported. */ \
|
|
|
|
uint32_t cluster_size = get<SPIRConstant>(ops[5]).scalar(); \
|
|
|
|
if (cluster_size != 4) \
|
|
|
|
SPIRV_CROSS_THROW("Metal only supports quad ClusteredReduce."); \
|
|
|
|
emit_unary_func_op(result_type, id, ops[4], "quad_" #msl_op); \
|
|
|
|
} \
|
|
|
|
else \
|
|
|
|
SPIRV_CROSS_THROW("Invalid group operation."); \
|
|
|
|
break; \
|
|
|
|
}
|
|
|
|
MSL_GROUP_OP(FMin, min)
|
|
|
|
MSL_GROUP_OP(FMax, max)
|
|
|
|
MSL_GROUP_OP(SMin, min)
|
|
|
|
MSL_GROUP_OP(SMax, max)
|
|
|
|
MSL_GROUP_OP(UMin, min)
|
|
|
|
MSL_GROUP_OP(UMax, max)
|
|
|
|
MSL_GROUP_OP(BitwiseAnd, and)
|
|
|
|
MSL_GROUP_OP(BitwiseOr, or)
|
|
|
|
MSL_GROUP_OP(BitwiseXor, xor)
|
|
|
|
MSL_GROUP_OP(LogicalAnd, and)
|
|
|
|
MSL_GROUP_OP(LogicalOr, or)
|
|
|
|
MSL_GROUP_OP(LogicalXor, xor)
|
|
|
|
// clang-format on
|
|
|
|
|
|
|
|
case OpGroupNonUniformQuadSwap:
|
|
|
|
{
|
|
|
|
// We can implement this easily based on the following table giving
|
|
|
|
// the target lane ID from the direction and current lane ID:
|
|
|
|
// Direction
|
|
|
|
// | 0 | 1 | 2 |
|
|
|
|
// ---+---+---+---+
|
|
|
|
// L 0 | 1 2 3
|
|
|
|
// a 1 | 0 3 2
|
|
|
|
// n 2 | 3 0 1
|
|
|
|
// e 3 | 2 1 0
|
|
|
|
// Notice that target = source ^ (direction + 1).
|
|
|
|
uint32_t mask = get<SPIRConstant>(ops[4]).scalar() + 1;
|
|
|
|
uint32_t mask_id = ir.increase_bound_by(1);
|
|
|
|
set<SPIRConstant>(mask_id, expression_type_id(ops[4]), mask, false);
|
|
|
|
emit_binary_func_op(result_type, id, ops[3], mask_id, "quad_shuffle_xor");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpGroupNonUniformQuadBroadcast:
|
|
|
|
emit_binary_func_op(result_type, id, ops[3], ops[4], "quad_broadcast");
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
SPIRV_CROSS_THROW("Invalid opcode for subgroup.");
|
|
|
|
}
|
|
|
|
|
|
|
|
register_control_dependent_expression(id);
|
|
|
|
}
|
|
|
|
|
2017-06-15 19:24:22 +00:00
|
|
|
string CompilerMSL::bitcast_glsl_op(const SPIRType &out_type, const SPIRType &in_type)
|
2017-05-19 22:14:08 +00:00
|
|
|
{
|
2019-01-30 13:49:55 +00:00
|
|
|
if (out_type.basetype == in_type.basetype)
|
|
|
|
return "";
|
2017-06-15 19:24:22 +00:00
|
|
|
|
2019-01-31 02:19:05 +00:00
|
|
|
assert(out_type.basetype != SPIRType::Boolean);
|
|
|
|
assert(in_type.basetype != SPIRType::Boolean);
|
|
|
|
|
2019-01-30 13:49:55 +00:00
|
|
|
bool integral_cast = type_is_integral(out_type) && type_is_integral(in_type);
|
|
|
|
bool same_size_cast = out_type.width == in_type.width;
|
|
|
|
|
|
|
|
if (integral_cast && same_size_cast)
|
|
|
|
{
|
|
|
|
// Trivial bitcast case, casts between integers.
|
|
|
|
return type_to_glsl(out_type);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// Fall back to the catch-all bitcast in MSL.
|
|
|
|
return "as_type<" + type_to_glsl(out_type) + ">";
|
|
|
|
}
|
2017-05-19 22:14:08 +00:00
|
|
|
}
|
|
|
|
|
2016-12-17 22:07:53 +00:00
|
|
|
// Returns an MSL string identifying the name of a SPIR-V builtin.
|
|
|
|
// Output builtins are qualified with the name of the stage out structure.
|
2017-07-24 08:07:02 +00:00
|
|
|
string CompilerMSL::builtin_to_glsl(BuiltIn builtin, StorageClass storage)
|
2016-04-06 21:42:27 +00:00
|
|
|
{
|
2016-05-05 07:33:18 +00:00
|
|
|
switch (builtin)
|
|
|
|
{
|
2016-12-17 22:07:53 +00:00
|
|
|
|
|
|
|
// Override GLSL compiler strictness
|
2016-05-05 07:33:18 +00:00
|
|
|
case BuiltInVertexId:
|
|
|
|
return "gl_VertexID";
|
|
|
|
case BuiltInInstanceId:
|
|
|
|
return "gl_InstanceID";
|
|
|
|
case BuiltInVertexIndex:
|
|
|
|
return "gl_VertexIndex";
|
|
|
|
case BuiltInInstanceIndex:
|
|
|
|
return "gl_InstanceIndex";
|
2018-08-28 18:47:29 +00:00
|
|
|
case BuiltInBaseVertex:
|
|
|
|
return "gl_BaseVertex";
|
|
|
|
case BuiltInBaseInstance:
|
|
|
|
return "gl_BaseInstance";
|
|
|
|
case BuiltInDrawIndex:
|
2018-08-29 17:05:33 +00:00
|
|
|
SPIRV_CROSS_THROW("DrawIndex is not supported in MSL.");
|
2016-12-17 22:07:53 +00:00
|
|
|
|
2017-06-30 23:10:46 +00:00
|
|
|
// When used in the entry function, output builtins are qualified with output struct name.
|
2018-02-13 19:44:40 +00:00
|
|
|
// Test storage class as NOT Input, as output builtins might be part of generic type.
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
// Also don't do this for tessellation control shaders.
|
2018-09-12 23:00:17 +00:00
|
|
|
case BuiltInViewportIndex:
|
|
|
|
if (!msl_options.supports_msl_version(2, 0))
|
|
|
|
SPIRV_CROSS_THROW("ViewportIndex requires Metal 2.0.");
|
|
|
|
/* fallthrough */
|
2016-12-17 22:07:53 +00:00
|
|
|
case BuiltInPosition:
|
|
|
|
case BuiltInPointSize:
|
|
|
|
case BuiltInClipDistance:
|
2018-01-08 21:18:34 +00:00
|
|
|
case BuiltInCullDistance:
|
2017-12-13 10:02:03 +00:00
|
|
|
case BuiltInLayer:
|
2017-12-13 10:03:31 +00:00
|
|
|
case BuiltInFragDepth:
|
2019-06-12 08:06:59 +00:00
|
|
|
case BuiltInFragStencilRefEXT:
|
2018-02-13 19:44:40 +00:00
|
|
|
case BuiltInSampleMask:
|
2019-02-04 05:58:46 +00:00
|
|
|
if (get_execution_model() == ExecutionModelTessellationControl)
|
|
|
|
break;
|
2018-10-05 09:30:57 +00:00
|
|
|
if (storage != StorageClassInput && current_function && (current_function->self == ir.default_entry_point))
|
2017-07-24 08:07:02 +00:00
|
|
|
return stage_out_var_name + "." + CompilerGLSL::builtin_to_glsl(builtin, storage);
|
2018-02-13 19:44:40 +00:00
|
|
|
|
|
|
|
break;
|
2016-12-17 22:07:53 +00:00
|
|
|
|
2019-06-13 09:33:40 +00:00
|
|
|
case BuiltInBaryCoordNV:
|
|
|
|
case BuiltInBaryCoordNoPerspNV:
|
|
|
|
if (storage == StorageClassInput && current_function && (current_function->self == ir.default_entry_point))
|
|
|
|
return stage_in_var_name + "." + CompilerGLSL::builtin_to_glsl(builtin, storage);
|
|
|
|
break;
|
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
case BuiltInTessLevelOuter:
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
if (get_execution_model() == ExecutionModelTessellationEvaluation)
|
2019-02-22 18:11:17 +00:00
|
|
|
{
|
|
|
|
if (storage != StorageClassOutput && !get_entry_point().flags.get(ExecutionModeTriangles) &&
|
|
|
|
current_function && (current_function->self == ir.default_entry_point))
|
|
|
|
return join(patch_stage_in_var_name, ".", CompilerGLSL::builtin_to_glsl(builtin, storage));
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
2019-02-04 05:58:46 +00:00
|
|
|
if (storage != StorageClassInput && current_function && (current_function->self == ir.default_entry_point))
|
|
|
|
return join(tess_factor_buffer_var_name, "[", to_expression(builtin_primitive_id_id),
|
|
|
|
"].edgeTessellationFactor");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BuiltInTessLevelInner:
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
if (get_execution_model() == ExecutionModelTessellationEvaluation)
|
2019-02-22 18:11:17 +00:00
|
|
|
{
|
|
|
|
if (storage != StorageClassOutput && !get_entry_point().flags.get(ExecutionModeTriangles) &&
|
|
|
|
current_function && (current_function->self == ir.default_entry_point))
|
|
|
|
return join(patch_stage_in_var_name, ".", CompilerGLSL::builtin_to_glsl(builtin, storage));
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
2019-02-04 05:58:46 +00:00
|
|
|
if (storage != StorageClassInput && current_function && (current_function->self == ir.default_entry_point))
|
|
|
|
return join(tess_factor_buffer_var_name, "[", to_expression(builtin_primitive_id_id),
|
|
|
|
"].insideTessellationFactor");
|
|
|
|
break;
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
default:
|
2018-02-13 19:44:40 +00:00
|
|
|
break;
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
2018-02-13 19:44:40 +00:00
|
|
|
|
|
|
|
return CompilerGLSL::builtin_to_glsl(builtin, storage);
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Returns an MSL string attribute qualifer for a SPIR-V builtin
|
|
|
|
string CompilerMSL::builtin_qualifier(BuiltIn builtin)
|
|
|
|
{
|
2016-07-28 09:16:02 +00:00
|
|
|
auto &execution = get_entry_point();
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
switch (builtin)
|
|
|
|
{
|
|
|
|
// Vertex function in
|
|
|
|
case BuiltInVertexId:
|
|
|
|
return "vertex_id";
|
|
|
|
case BuiltInVertexIndex:
|
|
|
|
return "vertex_id";
|
2018-08-28 18:47:29 +00:00
|
|
|
case BuiltInBaseVertex:
|
|
|
|
return "base_vertex";
|
2016-05-05 07:33:18 +00:00
|
|
|
case BuiltInInstanceId:
|
|
|
|
return "instance_id";
|
|
|
|
case BuiltInInstanceIndex:
|
|
|
|
return "instance_id";
|
2018-08-28 18:47:29 +00:00
|
|
|
case BuiltInBaseInstance:
|
|
|
|
return "base_instance";
|
|
|
|
case BuiltInDrawIndex:
|
2018-08-29 15:18:32 +00:00
|
|
|
SPIRV_CROSS_THROW("DrawIndex is not supported in MSL.");
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
// Vertex function out
|
|
|
|
case BuiltInClipDistance:
|
|
|
|
return "clip_distance";
|
|
|
|
case BuiltInPointSize:
|
|
|
|
return "point_size";
|
|
|
|
case BuiltInPosition:
|
2019-06-11 09:31:29 +00:00
|
|
|
if (position_invariant)
|
|
|
|
{
|
|
|
|
if (!msl_options.supports_msl_version(2, 1))
|
|
|
|
SPIRV_CROSS_THROW("Invariant position is only supported on MSL 2.1 and up.");
|
|
|
|
return "position, invariant";
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return "position";
|
2016-05-05 07:33:18 +00:00
|
|
|
case BuiltInLayer:
|
|
|
|
return "render_target_array_index";
|
2018-09-12 23:00:17 +00:00
|
|
|
case BuiltInViewportIndex:
|
|
|
|
if (!msl_options.supports_msl_version(2, 0))
|
|
|
|
SPIRV_CROSS_THROW("ViewportIndex requires Metal 2.0.");
|
|
|
|
return "viewport_array_index";
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
// Tess. control function in
|
|
|
|
case BuiltInInvocationId:
|
|
|
|
return "thread_index_in_threadgroup";
|
|
|
|
case BuiltInPatchVertices:
|
|
|
|
// Shouldn't be reached.
|
|
|
|
SPIRV_CROSS_THROW("PatchVertices is derived from the auxiliary buffer in MSL.");
|
|
|
|
case BuiltInPrimitiveId:
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
switch (execution.model)
|
|
|
|
{
|
|
|
|
case ExecutionModelTessellationControl:
|
|
|
|
return "threadgroup_position_in_grid";
|
|
|
|
case ExecutionModelTessellationEvaluation:
|
|
|
|
return "patch_id";
|
2019-06-13 09:33:40 +00:00
|
|
|
case ExecutionModelFragment:
|
|
|
|
if (msl_options.is_ios())
|
|
|
|
SPIRV_CROSS_THROW("PrimitiveId is not supported in fragment on iOS.");
|
|
|
|
else if (msl_options.is_macos() && !msl_options.supports_msl_version(2, 2))
|
|
|
|
SPIRV_CROSS_THROW("PrimitiveId on macOS requires MSL 2.2.");
|
|
|
|
return "primitive_id";
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
default:
|
|
|
|
SPIRV_CROSS_THROW("PrimitiveId is not supported in this execution model.");
|
|
|
|
}
|
2019-02-04 05:58:46 +00:00
|
|
|
|
|
|
|
// Tess. control function out
|
|
|
|
case BuiltInTessLevelOuter:
|
|
|
|
case BuiltInTessLevelInner:
|
|
|
|
// Shouldn't be reached.
|
|
|
|
SPIRV_CROSS_THROW("Tessellation levels are handled specially in MSL.");
|
|
|
|
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
// Tess. evaluation function in
|
|
|
|
case BuiltInTessCoord:
|
|
|
|
return "position_in_patch";
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
// Fragment function in
|
|
|
|
case BuiltInFrontFacing:
|
|
|
|
return "front_facing";
|
|
|
|
case BuiltInPointCoord:
|
|
|
|
return "point_coord";
|
2016-07-04 15:49:46 +00:00
|
|
|
case BuiltInFragCoord:
|
2016-05-05 07:33:18 +00:00
|
|
|
return "position";
|
|
|
|
case BuiltInSampleId:
|
|
|
|
return "sample_id";
|
|
|
|
case BuiltInSampleMask:
|
|
|
|
return "sample_mask";
|
2018-09-12 19:05:52 +00:00
|
|
|
case BuiltInSamplePosition:
|
|
|
|
// Shouldn't be reached.
|
|
|
|
SPIRV_CROSS_THROW("Sample position is retrieved by a function in MSL.");
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
// Fragment function out
|
|
|
|
case BuiltInFragDepth:
|
2018-03-12 12:09:25 +00:00
|
|
|
if (execution.flags.get(ExecutionModeDepthGreater))
|
2016-05-05 07:33:18 +00:00
|
|
|
return "depth(greater)";
|
2018-03-12 12:09:25 +00:00
|
|
|
else if (execution.flags.get(ExecutionModeDepthLess))
|
2016-05-05 07:33:18 +00:00
|
|
|
return "depth(less)";
|
2017-06-17 07:35:42 +00:00
|
|
|
else
|
2016-05-05 07:33:18 +00:00
|
|
|
return "depth(any)";
|
|
|
|
|
2019-06-12 08:06:59 +00:00
|
|
|
case BuiltInFragStencilRefEXT:
|
|
|
|
return "stencil";
|
|
|
|
|
2017-01-27 00:40:56 +00:00
|
|
|
// Compute function in
|
2017-01-22 18:09:04 +00:00
|
|
|
case BuiltInGlobalInvocationId:
|
|
|
|
return "thread_position_in_grid";
|
|
|
|
|
2017-09-29 10:16:53 +00:00
|
|
|
case BuiltInWorkgroupId:
|
|
|
|
return "threadgroup_position_in_grid";
|
|
|
|
|
|
|
|
case BuiltInNumWorkgroups:
|
|
|
|
return "threadgroups_per_grid";
|
|
|
|
|
2017-01-27 01:22:31 +00:00
|
|
|
case BuiltInLocalInvocationId:
|
|
|
|
return "thread_position_in_threadgroup";
|
|
|
|
|
|
|
|
case BuiltInLocalInvocationIndex:
|
|
|
|
return "thread_index_in_threadgroup";
|
|
|
|
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
case BuiltInSubgroupSize:
|
|
|
|
return "thread_execution_width";
|
|
|
|
|
|
|
|
case BuiltInNumSubgroups:
|
|
|
|
if (!msl_options.supports_msl_version(2))
|
|
|
|
SPIRV_CROSS_THROW("Subgroup builtins require Metal 2.0.");
|
|
|
|
return msl_options.is_ios() ? "quadgroups_per_threadgroup" : "simdgroups_per_threadgroup";
|
|
|
|
|
|
|
|
case BuiltInSubgroupId:
|
|
|
|
if (!msl_options.supports_msl_version(2))
|
|
|
|
SPIRV_CROSS_THROW("Subgroup builtins require Metal 2.0.");
|
|
|
|
return msl_options.is_ios() ? "quadgroup_index_in_threadgroup" : "simdgroup_index_in_threadgroup";
|
|
|
|
|
|
|
|
case BuiltInSubgroupLocalInvocationId:
|
|
|
|
if (!msl_options.supports_msl_version(2))
|
|
|
|
SPIRV_CROSS_THROW("Subgroup builtins require Metal 2.0.");
|
|
|
|
return msl_options.is_ios() ? "thread_index_in_quadgroup" : "thread_index_in_simdgroup";
|
|
|
|
|
|
|
|
case BuiltInSubgroupEqMask:
|
|
|
|
case BuiltInSubgroupGeMask:
|
|
|
|
case BuiltInSubgroupGtMask:
|
|
|
|
case BuiltInSubgroupLeMask:
|
|
|
|
case BuiltInSubgroupLtMask:
|
|
|
|
// Shouldn't be reached.
|
|
|
|
SPIRV_CROSS_THROW("Subgroup ballot masks are handled specially in MSL.");
|
|
|
|
|
2019-06-13 09:33:40 +00:00
|
|
|
case BuiltInBaryCoordNV:
|
|
|
|
// TODO: AMD barycentrics as well? Seem to have different swizzle and 2 components rather than 3.
|
|
|
|
if (msl_options.is_ios())
|
|
|
|
SPIRV_CROSS_THROW("Barycentrics not supported on iOS.");
|
|
|
|
else if (!msl_options.supports_msl_version(2, 2))
|
|
|
|
SPIRV_CROSS_THROW("Barycentrics are only supported in MSL 2.2 and above on macOS.");
|
|
|
|
return "barycentric_coord, center_perspective";
|
|
|
|
|
|
|
|
case BuiltInBaryCoordNoPerspNV:
|
|
|
|
// TODO: AMD barycentrics as well? Seem to have different swizzle and 2 components rather than 3.
|
|
|
|
if (msl_options.is_ios())
|
|
|
|
SPIRV_CROSS_THROW("Barycentrics not supported on iOS.");
|
|
|
|
else if (!msl_options.supports_msl_version(2, 2))
|
|
|
|
SPIRV_CROSS_THROW("Barycentrics are only supported in MSL 2.2 and above on macOS.");
|
|
|
|
return "barycentric_coord, center_no_perspective";
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
default:
|
|
|
|
return "unsupported-built-in";
|
|
|
|
}
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Returns an MSL string type declaration for a SPIR-V builtin
|
2019-06-13 09:33:40 +00:00
|
|
|
string CompilerMSL::builtin_type_decl(BuiltIn builtin, uint32_t id)
|
2016-04-06 21:42:27 +00:00
|
|
|
{
|
2019-02-22 18:11:17 +00:00
|
|
|
const SPIREntryPoint &execution = get_entry_point();
|
2016-05-05 07:33:18 +00:00
|
|
|
switch (builtin)
|
|
|
|
{
|
|
|
|
// Vertex function in
|
|
|
|
case BuiltInVertexId:
|
|
|
|
return "uint";
|
|
|
|
case BuiltInVertexIndex:
|
|
|
|
return "uint";
|
2018-08-28 18:47:29 +00:00
|
|
|
case BuiltInBaseVertex:
|
|
|
|
return "uint";
|
2016-05-05 07:33:18 +00:00
|
|
|
case BuiltInInstanceId:
|
|
|
|
return "uint";
|
|
|
|
case BuiltInInstanceIndex:
|
|
|
|
return "uint";
|
2018-08-28 18:47:29 +00:00
|
|
|
case BuiltInBaseInstance:
|
|
|
|
return "uint";
|
|
|
|
case BuiltInDrawIndex:
|
2018-08-29 15:18:32 +00:00
|
|
|
SPIRV_CROSS_THROW("DrawIndex is not supported in MSL.");
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
// Vertex function out
|
|
|
|
case BuiltInClipDistance:
|
|
|
|
return "float";
|
|
|
|
case BuiltInPointSize:
|
|
|
|
return "float";
|
|
|
|
case BuiltInPosition:
|
|
|
|
return "float4";
|
2017-12-13 10:02:03 +00:00
|
|
|
case BuiltInLayer:
|
|
|
|
return "uint";
|
2018-09-12 23:00:17 +00:00
|
|
|
case BuiltInViewportIndex:
|
|
|
|
if (!msl_options.supports_msl_version(2, 0))
|
|
|
|
SPIRV_CROSS_THROW("ViewportIndex requires Metal 2.0.");
|
|
|
|
return "uint";
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
// Tess. control function in
|
|
|
|
case BuiltInInvocationId:
|
|
|
|
return "uint";
|
|
|
|
case BuiltInPatchVertices:
|
|
|
|
return "uint";
|
|
|
|
case BuiltInPrimitiveId:
|
|
|
|
return "uint";
|
|
|
|
|
|
|
|
// Tess. control function out
|
|
|
|
case BuiltInTessLevelInner:
|
2019-02-22 18:11:17 +00:00
|
|
|
if (execution.model == ExecutionModelTessellationEvaluation)
|
|
|
|
return !execution.flags.get(ExecutionModeTriangles) ? "float2" : "float";
|
2019-02-04 05:58:46 +00:00
|
|
|
return "half";
|
|
|
|
case BuiltInTessLevelOuter:
|
2019-02-22 18:11:17 +00:00
|
|
|
if (execution.model == ExecutionModelTessellationEvaluation)
|
|
|
|
return !execution.flags.get(ExecutionModeTriangles) ? "float4" : "float";
|
2019-02-04 05:58:46 +00:00
|
|
|
return "half";
|
|
|
|
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
// Tess. evaluation function in
|
|
|
|
case BuiltInTessCoord:
|
2019-02-22 18:11:17 +00:00
|
|
|
return execution.flags.get(ExecutionModeTriangles) ? "float3" : "float2";
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
// Fragment function in
|
|
|
|
case BuiltInFrontFacing:
|
|
|
|
return "bool";
|
|
|
|
case BuiltInPointCoord:
|
|
|
|
return "float2";
|
2016-07-04 15:49:46 +00:00
|
|
|
case BuiltInFragCoord:
|
2016-05-05 07:33:18 +00:00
|
|
|
return "float4";
|
|
|
|
case BuiltInSampleId:
|
|
|
|
return "uint";
|
|
|
|
case BuiltInSampleMask:
|
|
|
|
return "uint";
|
2018-09-12 19:05:52 +00:00
|
|
|
case BuiltInSamplePosition:
|
|
|
|
return "float2";
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2018-08-28 18:47:50 +00:00
|
|
|
// Fragment function out
|
|
|
|
case BuiltInFragDepth:
|
|
|
|
return "float";
|
|
|
|
|
2019-06-12 08:06:59 +00:00
|
|
|
case BuiltInFragStencilRefEXT:
|
|
|
|
return "uint";
|
|
|
|
|
2017-01-22 18:09:04 +00:00
|
|
|
// Compute function in
|
|
|
|
case BuiltInGlobalInvocationId:
|
2017-01-27 01:22:31 +00:00
|
|
|
case BuiltInLocalInvocationId:
|
2017-09-29 10:16:53 +00:00
|
|
|
case BuiltInNumWorkgroups:
|
|
|
|
case BuiltInWorkgroupId:
|
2017-01-27 01:22:31 +00:00
|
|
|
return "uint3";
|
|
|
|
case BuiltInLocalInvocationIndex:
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
case BuiltInNumSubgroups:
|
|
|
|
case BuiltInSubgroupId:
|
|
|
|
case BuiltInSubgroupSize:
|
|
|
|
case BuiltInSubgroupLocalInvocationId:
|
2017-01-27 01:22:31 +00:00
|
|
|
return "uint";
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
case BuiltInSubgroupEqMask:
|
|
|
|
case BuiltInSubgroupGeMask:
|
|
|
|
case BuiltInSubgroupGtMask:
|
|
|
|
case BuiltInSubgroupLeMask:
|
|
|
|
case BuiltInSubgroupLtMask:
|
|
|
|
return "uint4";
|
2017-01-22 18:09:04 +00:00
|
|
|
|
2018-11-28 14:18:43 +00:00
|
|
|
case BuiltInHelperInvocation:
|
|
|
|
return "bool";
|
|
|
|
|
2019-06-13 09:33:40 +00:00
|
|
|
case BuiltInBaryCoordNV:
|
|
|
|
case BuiltInBaryCoordNoPerspNV:
|
|
|
|
// Use the type as declared, can be 1, 2 or 3 components.
|
|
|
|
return type_to_glsl(get_variable_data_type(get<SPIRVariable>(id)));
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
default:
|
|
|
|
return "unsupported-built-in-type";
|
|
|
|
}
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2017-01-29 18:28:20 +00:00
|
|
|
// Returns the declaration of a built-in argument to a function
|
|
|
|
string CompilerMSL::built_in_func_arg(BuiltIn builtin, bool prefix_comma)
|
|
|
|
{
|
|
|
|
string bi_arg;
|
|
|
|
if (prefix_comma)
|
|
|
|
bi_arg += ", ";
|
2017-07-24 08:07:02 +00:00
|
|
|
|
2017-11-06 02:34:42 +00:00
|
|
|
bi_arg += builtin_type_decl(builtin);
|
2017-07-24 08:07:02 +00:00
|
|
|
bi_arg += " " + builtin_to_glsl(builtin, StorageClassInput);
|
2017-01-29 18:28:20 +00:00
|
|
|
bi_arg += " [[" + builtin_qualifier(builtin) + "]]";
|
2017-09-04 02:55:54 +00:00
|
|
|
|
2017-11-06 02:34:42 +00:00
|
|
|
return bi_arg;
|
2017-01-29 18:28:20 +00:00
|
|
|
}
|
|
|
|
|
2017-03-06 16:00:23 +00:00
|
|
|
// Returns the byte size of a struct member.
|
2016-05-05 07:33:18 +00:00
|
|
|
size_t CompilerMSL::get_declared_struct_member_size(const SPIRType &struct_type, uint32_t index) const
|
2016-04-06 21:42:27 +00:00
|
|
|
{
|
2017-06-15 19:24:22 +00:00
|
|
|
auto &type = get<SPIRType>(struct_type.member_types[index]);
|
2016-12-17 22:07:53 +00:00
|
|
|
|
2016-10-24 13:24:24 +00:00
|
|
|
switch (type.basetype)
|
|
|
|
{
|
|
|
|
case SPIRType::Unknown:
|
|
|
|
case SPIRType::Void:
|
|
|
|
case SPIRType::AtomicCounter:
|
|
|
|
case SPIRType::Image:
|
|
|
|
case SPIRType::SampledImage:
|
|
|
|
case SPIRType::Sampler:
|
2017-03-06 16:00:23 +00:00
|
|
|
SPIRV_CROSS_THROW("Querying size of opaque object.");
|
|
|
|
|
2016-10-24 13:24:24 +00:00
|
|
|
default:
|
2017-03-06 16:00:23 +00:00
|
|
|
{
|
2017-06-15 19:24:22 +00:00
|
|
|
// For arrays, we can use ArrayStride to get an easy check.
|
2017-11-06 02:34:42 +00:00
|
|
|
// Runtime arrays will have zero size so force to min of one.
|
2017-03-06 16:00:23 +00:00
|
|
|
if (!type.array.empty())
|
2018-02-23 14:09:28 +00:00
|
|
|
{
|
2018-11-01 13:56:25 +00:00
|
|
|
uint32_t array_size = to_array_size_literal(type);
|
2018-02-23 14:09:28 +00:00
|
|
|
return type_struct_member_array_stride(struct_type, index) * max(array_size, 1u);
|
|
|
|
}
|
2017-11-06 02:34:42 +00:00
|
|
|
|
|
|
|
if (type.basetype == SPIRType::Struct)
|
2019-01-28 13:39:05 +00:00
|
|
|
{
|
|
|
|
// The size of a struct in Metal is aligned up to its natural alignment.
|
|
|
|
auto size = get_declared_struct_size(type);
|
|
|
|
auto alignment = get_declared_struct_member_alignment(struct_type, index);
|
|
|
|
return (size + alignment - 1) & ~(alignment - 1);
|
|
|
|
}
|
2017-03-06 16:00:23 +00:00
|
|
|
|
2018-02-11 21:52:57 +00:00
|
|
|
uint32_t component_size = type.width / 8;
|
|
|
|
uint32_t vecsize = type.vecsize;
|
|
|
|
uint32_t columns = type.columns;
|
|
|
|
|
|
|
|
// An unpacked 3-element vector or matrix column is the same memory size as a 4-element.
|
2019-01-17 10:22:24 +00:00
|
|
|
if (vecsize == 3 && !has_extended_member_decoration(struct_type.self, index, SPIRVCrossDecorationPacked))
|
2018-02-11 21:52:57 +00:00
|
|
|
vecsize = 4;
|
2017-03-06 16:00:23 +00:00
|
|
|
|
2018-02-11 21:52:57 +00:00
|
|
|
return component_size * vecsize * columns;
|
2017-03-06 16:00:23 +00:00
|
|
|
}
|
2016-10-24 13:24:24 +00:00
|
|
|
}
|
2017-03-06 16:00:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Returns the byte alignment of a struct member.
|
|
|
|
size_t CompilerMSL::get_declared_struct_member_alignment(const SPIRType &struct_type, uint32_t index) const
|
|
|
|
{
|
2017-06-15 19:24:22 +00:00
|
|
|
auto &type = get<SPIRType>(struct_type.member_types[index]);
|
2016-10-24 13:24:24 +00:00
|
|
|
|
2017-03-06 16:00:23 +00:00
|
|
|
switch (type.basetype)
|
2016-10-24 13:24:24 +00:00
|
|
|
{
|
2017-03-06 16:00:23 +00:00
|
|
|
case SPIRType::Unknown:
|
|
|
|
case SPIRType::Void:
|
|
|
|
case SPIRType::AtomicCounter:
|
|
|
|
case SPIRType::Image:
|
|
|
|
case SPIRType::SampledImage:
|
|
|
|
case SPIRType::Sampler:
|
|
|
|
SPIRV_CROSS_THROW("Querying alignment of opaque object.");
|
2016-12-17 22:07:53 +00:00
|
|
|
|
2019-06-19 08:11:10 +00:00
|
|
|
case SPIRType::Int64:
|
|
|
|
SPIRV_CROSS_THROW("long types are not supported in buffers in MSL.");
|
|
|
|
case SPIRType::UInt64:
|
|
|
|
SPIRV_CROSS_THROW("ulong types are not supported in buffers in MSL.");
|
|
|
|
case SPIRType::Double:
|
|
|
|
SPIRV_CROSS_THROW("double types are not supported in buffers in MSL.");
|
|
|
|
|
2017-03-06 16:00:23 +00:00
|
|
|
case SPIRType::Struct:
|
2019-01-28 10:02:28 +00:00
|
|
|
{
|
|
|
|
// In MSL, a struct's alignment is equal to the maximum alignment of any of its members.
|
|
|
|
uint32_t alignment = 1;
|
|
|
|
for (uint32_t i = 0; i < type.member_types.size(); i++)
|
|
|
|
alignment = max(alignment, uint32_t(get_declared_struct_member_alignment(type, i)));
|
|
|
|
return alignment;
|
|
|
|
}
|
2017-03-06 16:00:23 +00:00
|
|
|
|
|
|
|
default:
|
2017-03-01 02:44:36 +00:00
|
|
|
{
|
2018-02-11 21:52:57 +00:00
|
|
|
// Alignment of packed type is the same as the underlying component or column size.
|
|
|
|
// Alignment of unpacked type is the same as the vector size.
|
|
|
|
// Alignment of 3-elements vector is the same as 4-elements (including packed using column).
|
2017-11-06 02:34:42 +00:00
|
|
|
if (member_is_packed_type(struct_type, index))
|
2019-01-28 13:39:05 +00:00
|
|
|
{
|
|
|
|
// This is getting pretty complicated.
|
|
|
|
// The special case of array of float/float2 needs to be handled here.
|
2019-01-28 14:20:06 +00:00
|
|
|
uint32_t packed_type_id =
|
|
|
|
get_extended_member_decoration(struct_type.self, index, SPIRVCrossDecorationPackedType);
|
2019-01-28 13:39:05 +00:00
|
|
|
const SPIRType *packed_type = packed_type_id != 0 ? &get<SPIRType>(packed_type_id) : nullptr;
|
2019-01-28 14:20:06 +00:00
|
|
|
if (packed_type && is_array(*packed_type) && !is_matrix(*packed_type) &&
|
|
|
|
packed_type->basetype != SPIRType::Struct)
|
2019-01-28 13:39:05 +00:00
|
|
|
return (packed_type->width / 8) * 4;
|
|
|
|
else
|
|
|
|
return (type.width / 8) * (type.columns == 3 ? 4 : type.columns);
|
|
|
|
}
|
2017-03-06 16:00:23 +00:00
|
|
|
else
|
2018-02-11 21:52:57 +00:00
|
|
|
return (type.width / 8) * (type.vecsize == 3 ? 4 : type.vecsize);
|
2017-03-01 02:44:36 +00:00
|
|
|
}
|
2016-10-24 13:24:24 +00:00
|
|
|
}
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2017-05-07 10:36:14 +00:00
|
|
|
bool CompilerMSL::skip_argument(uint32_t) const
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-09-24 17:10:27 +00:00
|
|
|
void CompilerMSL::analyze_sampled_image_usage()
|
2018-09-23 00:36:11 +00:00
|
|
|
{
|
|
|
|
if (msl_options.swizzle_texture_samples)
|
|
|
|
{
|
|
|
|
SampledImageScanner scanner(*this);
|
2018-10-05 09:30:57 +00:00
|
|
|
traverse_all_reachable_opcodes(get<SPIRFunction>(ir.default_entry_point), scanner);
|
2018-09-23 00:36:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CompilerMSL::SampledImageScanner::handle(spv::Op opcode, const uint32_t *args, uint32_t length)
|
|
|
|
{
|
|
|
|
switch (opcode)
|
|
|
|
{
|
|
|
|
case OpLoad:
|
|
|
|
case OpImage:
|
|
|
|
case OpSampledImage:
|
|
|
|
{
|
|
|
|
if (length < 3)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
uint32_t result_type = args[0];
|
|
|
|
auto &type = compiler.get<SPIRType>(result_type);
|
|
|
|
if ((type.basetype != SPIRType::Image && type.basetype != SPIRType::SampledImage) || type.image.sampled != 1)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
uint32_t id = args[1];
|
|
|
|
compiler.set<SPIRExpression>(id, "", result_type, true);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case OpImageSampleExplicitLod:
|
|
|
|
case OpImageSampleProjExplicitLod:
|
|
|
|
case OpImageSampleDrefExplicitLod:
|
|
|
|
case OpImageSampleProjDrefExplicitLod:
|
|
|
|
case OpImageSampleImplicitLod:
|
|
|
|
case OpImageSampleProjImplicitLod:
|
|
|
|
case OpImageSampleDrefImplicitLod:
|
|
|
|
case OpImageSampleProjDrefImplicitLod:
|
|
|
|
case OpImageFetch:
|
|
|
|
case OpImageGather:
|
2018-09-29 14:52:26 +00:00
|
|
|
case OpImageDrefGather:
|
2018-09-27 11:36:38 +00:00
|
|
|
compiler.has_sampled_images =
|
|
|
|
compiler.has_sampled_images || compiler.is_sampled_image_type(compiler.expression_type(args[2]));
|
2019-05-09 10:15:45 +00:00
|
|
|
compiler.needs_swizzle_buffer_def = compiler.needs_swizzle_buffer_def || compiler.has_sampled_images;
|
2018-09-23 00:36:11 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-11-06 02:34:42 +00:00
|
|
|
bool CompilerMSL::OpCodePreprocessor::handle(Op opcode, const uint32_t *args, uint32_t length)
|
2016-12-21 21:31:13 +00:00
|
|
|
{
|
2017-05-19 22:14:08 +00:00
|
|
|
// Since MSL exists in a single execution scope, function prototype declarations are not
|
|
|
|
// needed, and clutter the output. If secondary functions are output (either as a SPIR-V
|
|
|
|
// function implementation or as indicated by the presence of OpFunctionCall), then set
|
|
|
|
// suppress_missing_prototypes to suppress compiler warnings of missing function prototypes.
|
|
|
|
|
|
|
|
// Mark if the input requires the implementation of an SPIR-V function that does not exist in Metal.
|
2017-11-06 02:34:42 +00:00
|
|
|
SPVFuncImpl spv_func = get_spv_func_impl(opcode, args);
|
2017-05-19 22:14:08 +00:00
|
|
|
if (spv_func != SPVFuncImplNone)
|
|
|
|
{
|
|
|
|
compiler.spv_function_implementations.insert(spv_func);
|
|
|
|
suppress_missing_prototypes = true;
|
|
|
|
}
|
|
|
|
|
2016-12-21 21:31:13 +00:00
|
|
|
switch (opcode)
|
|
|
|
{
|
2017-01-20 16:33:59 +00:00
|
|
|
|
|
|
|
case OpFunctionCall:
|
|
|
|
suppress_missing_prototypes = true;
|
2016-12-21 21:31:13 +00:00
|
|
|
break;
|
|
|
|
|
2018-07-26 04:50:33 +00:00
|
|
|
case OpImageWrite:
|
2018-07-27 20:53:36 +00:00
|
|
|
uses_resource_write = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OpStore:
|
|
|
|
check_resource_write(args[0]);
|
2018-07-26 04:50:33 +00:00
|
|
|
break;
|
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
case OpAtomicExchange:
|
|
|
|
case OpAtomicCompareExchange:
|
|
|
|
case OpAtomicCompareExchangeWeak:
|
|
|
|
case OpAtomicIIncrement:
|
|
|
|
case OpAtomicIDecrement:
|
|
|
|
case OpAtomicIAdd:
|
|
|
|
case OpAtomicISub:
|
|
|
|
case OpAtomicSMin:
|
|
|
|
case OpAtomicUMin:
|
|
|
|
case OpAtomicSMax:
|
|
|
|
case OpAtomicUMax:
|
|
|
|
case OpAtomicAnd:
|
|
|
|
case OpAtomicOr:
|
|
|
|
case OpAtomicXor:
|
2018-07-27 20:53:36 +00:00
|
|
|
uses_atomics = true;
|
|
|
|
check_resource_write(args[2]);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OpAtomicLoad:
|
2017-05-19 22:14:08 +00:00
|
|
|
uses_atomics = true;
|
|
|
|
break;
|
|
|
|
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
case OpGroupNonUniformInverseBallot:
|
|
|
|
needs_subgroup_invocation_id = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OpGroupNonUniformBallotBitCount:
|
|
|
|
if (args[3] != GroupOperationReduce)
|
|
|
|
needs_subgroup_invocation_id = true;
|
|
|
|
break;
|
|
|
|
|
2019-05-27 09:59:29 +00:00
|
|
|
case OpArrayLength:
|
|
|
|
{
|
|
|
|
auto *var = compiler.maybe_get_backing_variable(args[2]);
|
|
|
|
if (var)
|
|
|
|
compiler.buffers_requiring_array_length.insert(var->self);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpInBoundsAccessChain:
|
|
|
|
case OpAccessChain:
|
|
|
|
case OpPtrAccessChain:
|
|
|
|
{
|
|
|
|
// OpArrayLength might want to know if taking ArrayLength of an array of SSBOs.
|
|
|
|
uint32_t result_type = args[0];
|
|
|
|
uint32_t id = args[1];
|
|
|
|
uint32_t ptr = args[2];
|
|
|
|
compiler.set<SPIRExpression>(id, "", result_type, true);
|
|
|
|
compiler.register_read(id, ptr, true);
|
|
|
|
compiler.ir.ids[id].set_allow_type_rewrite();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-12-21 21:31:13 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2017-05-19 22:14:08 +00:00
|
|
|
|
2018-01-05 02:13:38 +00:00
|
|
|
// If it has one, keep track of the instruction's result type, mapped by ID
|
2018-01-18 11:07:10 +00:00
|
|
|
uint32_t result_type, result_id;
|
|
|
|
if (compiler.instruction_to_result_type(result_type, result_id, opcode, args, length))
|
|
|
|
result_types[result_id] = result_type;
|
2017-11-06 02:34:42 +00:00
|
|
|
|
2016-12-21 21:31:13 +00:00
|
|
|
return true;
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2018-07-27 20:53:36 +00:00
|
|
|
// If the variable is a Uniform or StorageBuffer, mark that a resource has been written to.
|
|
|
|
void CompilerMSL::OpCodePreprocessor::check_resource_write(uint32_t var_id)
|
|
|
|
{
|
|
|
|
auto *p_var = compiler.maybe_get_backing_variable(var_id);
|
|
|
|
StorageClass sc = p_var ? p_var->storage : StorageClassMax;
|
|
|
|
if (sc == StorageClassUniform || sc == StorageClassStorageBuffer)
|
|
|
|
uses_resource_write = true;
|
|
|
|
}
|
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
// Returns an enumeration of a SPIR-V function that needs to be output for certain Op codes.
|
2017-11-06 02:34:42 +00:00
|
|
|
CompilerMSL::SPVFuncImpl CompilerMSL::OpCodePreprocessor::get_spv_func_impl(Op opcode, const uint32_t *args)
|
2017-05-19 22:14:08 +00:00
|
|
|
{
|
|
|
|
switch (opcode)
|
|
|
|
{
|
|
|
|
case OpFMod:
|
|
|
|
return SPVFuncImplMod;
|
|
|
|
|
2018-02-05 11:37:41 +00:00
|
|
|
case OpFunctionCall:
|
|
|
|
{
|
|
|
|
auto &return_type = compiler.get<SPIRType>(args[0]);
|
2018-09-11 10:58:03 +00:00
|
|
|
if (return_type.array.size() > 1)
|
|
|
|
{
|
|
|
|
if (return_type.array.size() > SPVFuncImplArrayCopyMultidimMax)
|
|
|
|
SPIRV_CROSS_THROW("Cannot support this many dimensions for arrays of arrays.");
|
|
|
|
return static_cast<SPVFuncImpl>(SPVFuncImplArrayCopyMultidimBase + return_type.array.size());
|
|
|
|
}
|
|
|
|
else if (return_type.array.size() > 0)
|
2018-02-05 11:37:41 +00:00
|
|
|
return SPVFuncImplArrayCopy;
|
2018-06-26 21:30:21 +00:00
|
|
|
|
|
|
|
break;
|
2018-02-05 11:37:41 +00:00
|
|
|
}
|
|
|
|
|
2017-11-06 02:34:42 +00:00
|
|
|
case OpStore:
|
|
|
|
{
|
|
|
|
// Get the result type of the RHS. Since this is run as a pre-processing stage,
|
|
|
|
// we must extract the result type directly from the Instruction, rather than the ID.
|
2018-07-05 12:43:12 +00:00
|
|
|
uint32_t id_lhs = args[0];
|
2017-11-06 02:34:42 +00:00
|
|
|
uint32_t id_rhs = args[1];
|
|
|
|
|
2018-02-08 12:06:29 +00:00
|
|
|
const SPIRType *type = nullptr;
|
2018-10-05 09:30:57 +00:00
|
|
|
if (compiler.ir.ids[id_rhs].get_type() != TypeNone)
|
2018-02-08 12:06:29 +00:00
|
|
|
{
|
|
|
|
// Could be a constant, or similar.
|
|
|
|
type = &compiler.expression_type(id_rhs);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// Or ... an expression.
|
2018-06-26 21:30:21 +00:00
|
|
|
uint32_t tid = result_types[id_rhs];
|
|
|
|
if (tid)
|
|
|
|
type = &compiler.get<SPIRType>(tid);
|
2018-02-08 12:06:29 +00:00
|
|
|
}
|
|
|
|
|
2018-07-05 12:43:12 +00:00
|
|
|
auto *var = compiler.maybe_get<SPIRVariable>(id_lhs);
|
|
|
|
|
|
|
|
// Are we simply assigning to a statically assigned variable which takes a constant?
|
|
|
|
// Don't bother emitting this function.
|
|
|
|
bool static_expression_lhs =
|
|
|
|
var && var->storage == StorageClassFunction && var->statically_assigned && var->remapped_variable;
|
|
|
|
if (type && compiler.is_array(*type) && !static_expression_lhs)
|
2018-09-11 10:58:03 +00:00
|
|
|
{
|
|
|
|
if (type->array.size() > 1)
|
|
|
|
{
|
|
|
|
if (type->array.size() > SPVFuncImplArrayCopyMultidimMax)
|
|
|
|
SPIRV_CROSS_THROW("Cannot support this many dimensions for arrays of arrays.");
|
|
|
|
return static_cast<SPVFuncImpl>(SPVFuncImplArrayCopyMultidimBase + type->array.size());
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return SPVFuncImplArrayCopy;
|
|
|
|
}
|
2018-06-26 21:30:21 +00:00
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OpImageFetch:
|
2018-09-23 19:35:25 +00:00
|
|
|
case OpImageRead:
|
2018-09-01 06:52:23 +00:00
|
|
|
case OpImageWrite:
|
2018-06-26 21:30:21 +00:00
|
|
|
{
|
|
|
|
// Retrieve the image type, and if it's a Buffer, emit a texel coordinate function
|
2018-09-01 06:52:23 +00:00
|
|
|
uint32_t tid = result_types[args[opcode == OpImageWrite ? 0 : 2]];
|
2019-04-23 10:17:21 +00:00
|
|
|
if (tid && compiler.get<SPIRType>(tid).image.dim == DimBuffer && !compiler.msl_options.texture_buffer_native)
|
2018-06-26 21:30:21 +00:00
|
|
|
return SPVFuncImplTexelBufferCoords;
|
|
|
|
|
2018-09-20 01:36:33 +00:00
|
|
|
if (opcode == OpImageFetch && compiler.msl_options.swizzle_texture_samples)
|
|
|
|
return SPVFuncImplTextureSwizzle;
|
|
|
|
|
2017-11-06 02:34:42 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-09-20 01:36:33 +00:00
|
|
|
case OpImageSampleExplicitLod:
|
|
|
|
case OpImageSampleProjExplicitLod:
|
|
|
|
case OpImageSampleDrefExplicitLod:
|
|
|
|
case OpImageSampleProjDrefExplicitLod:
|
|
|
|
case OpImageSampleImplicitLod:
|
|
|
|
case OpImageSampleProjImplicitLod:
|
|
|
|
case OpImageSampleDrefImplicitLod:
|
|
|
|
case OpImageSampleProjDrefImplicitLod:
|
|
|
|
case OpImageGather:
|
2018-09-29 14:52:26 +00:00
|
|
|
case OpImageDrefGather:
|
2018-09-20 01:36:33 +00:00
|
|
|
if (compiler.msl_options.swizzle_texture_samples)
|
|
|
|
return SPVFuncImplTextureSwizzle;
|
|
|
|
break;
|
|
|
|
|
2018-09-12 08:25:51 +00:00
|
|
|
case OpCompositeConstruct:
|
|
|
|
{
|
|
|
|
auto &type = compiler.get<SPIRType>(args[0]);
|
|
|
|
if (type.array.size() > 1) // We need to use copies to build the composite.
|
|
|
|
return static_cast<SPVFuncImpl>(SPVFuncImplArrayCopyMultidimBase + type.array.size() - 1);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
case OpExtInst:
|
|
|
|
{
|
|
|
|
uint32_t extension_set = args[2];
|
2017-11-06 02:34:42 +00:00
|
|
|
if (compiler.get<SPIRExtension>(extension_set).ext == SPIRExtension::GLSL)
|
2017-05-19 22:14:08 +00:00
|
|
|
{
|
|
|
|
GLSLstd450 op_450 = static_cast<GLSLstd450>(args[3]);
|
|
|
|
switch (op_450)
|
|
|
|
{
|
|
|
|
case GLSLstd450Radians:
|
|
|
|
return SPVFuncImplRadians;
|
|
|
|
case GLSLstd450Degrees:
|
|
|
|
return SPVFuncImplDegrees;
|
|
|
|
case GLSLstd450FindILsb:
|
|
|
|
return SPVFuncImplFindILsb;
|
|
|
|
case GLSLstd450FindSMsb:
|
|
|
|
return SPVFuncImplFindSMsb;
|
|
|
|
case GLSLstd450FindUMsb:
|
|
|
|
return SPVFuncImplFindUMsb;
|
2018-11-07 12:24:21 +00:00
|
|
|
case GLSLstd450SSign:
|
|
|
|
return SPVFuncImplSSign;
|
2017-05-19 22:14:08 +00:00
|
|
|
case GLSLstd450MatrixInverse:
|
|
|
|
{
|
2017-11-06 02:34:42 +00:00
|
|
|
auto &mat_type = compiler.get<SPIRType>(args[0]);
|
2017-05-19 22:14:08 +00:00
|
|
|
switch (mat_type.columns)
|
|
|
|
{
|
|
|
|
case 2:
|
|
|
|
return SPVFuncImplInverse2x2;
|
|
|
|
case 3:
|
|
|
|
return SPVFuncImplInverse3x3;
|
|
|
|
case 4:
|
|
|
|
return SPVFuncImplInverse4x4;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
case OpGroupNonUniformBallot:
|
|
|
|
return SPVFuncImplSubgroupBallot;
|
|
|
|
|
|
|
|
case OpGroupNonUniformInverseBallot:
|
|
|
|
case OpGroupNonUniformBallotBitExtract:
|
|
|
|
return SPVFuncImplSubgroupBallotBitExtract;
|
|
|
|
|
|
|
|
case OpGroupNonUniformBallotFindLSB:
|
|
|
|
return SPVFuncImplSubgroupBallotFindLSB;
|
|
|
|
|
|
|
|
case OpGroupNonUniformBallotFindMSB:
|
|
|
|
return SPVFuncImplSubgroupBallotFindMSB;
|
|
|
|
|
|
|
|
case OpGroupNonUniformBallotBitCount:
|
|
|
|
return SPVFuncImplSubgroupBallotBitCount;
|
|
|
|
|
|
|
|
case OpGroupNonUniformAllEqual:
|
|
|
|
return SPVFuncImplSubgroupAllEqual;
|
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return SPVFuncImplNone;
|
|
|
|
}
|
|
|
|
|
2016-12-18 23:48:15 +00:00
|
|
|
// Sort both type and meta member content based on builtin status (put builtins at end),
|
|
|
|
// then by the required sorting aspect.
|
2016-12-21 21:31:13 +00:00
|
|
|
void CompilerMSL::MemberSorter::sort()
|
2016-04-06 21:42:27 +00:00
|
|
|
{
|
2017-01-31 16:02:44 +00:00
|
|
|
// Create a temporary array of consecutive member indices and sort it based on how
|
2016-12-18 23:48:15 +00:00
|
|
|
// the members should be reordered, based on builtin and sorting aspect meta info.
|
2016-05-05 07:33:18 +00:00
|
|
|
size_t mbr_cnt = type.member_types.size();
|
2019-04-02 09:19:03 +00:00
|
|
|
SmallVector<uint32_t> mbr_idxs(mbr_cnt);
|
2017-02-09 17:14:30 +00:00
|
|
|
iota(mbr_idxs.begin(), mbr_idxs.end(), 0); // Fill with consecutive indices
|
2016-12-18 23:48:15 +00:00
|
|
|
std::sort(mbr_idxs.begin(), mbr_idxs.end(), *this); // Sort member indices based on sorting aspect
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
// Move type and meta member info to the order defined by the sorted member indices.
|
|
|
|
// This is done by creating temporary copies of both member types and meta, and then
|
|
|
|
// copying back to the original content at the sorted indices.
|
|
|
|
auto mbr_types_cpy = type.member_types;
|
|
|
|
auto mbr_meta_cpy = meta.members;
|
|
|
|
for (uint32_t mbr_idx = 0; mbr_idx < mbr_cnt; mbr_idx++)
|
|
|
|
{
|
|
|
|
type.member_types[mbr_idx] = mbr_types_cpy[mbr_idxs[mbr_idx]];
|
|
|
|
meta.members[mbr_idx] = mbr_meta_cpy[mbr_idxs[mbr_idx]];
|
|
|
|
}
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2016-12-18 23:48:15 +00:00
|
|
|
// Sort first by builtin status (put builtins at end), then by the sorting aspect.
|
2016-12-21 21:31:13 +00:00
|
|
|
bool CompilerMSL::MemberSorter::operator()(uint32_t mbr_idx1, uint32_t mbr_idx2)
|
2016-04-06 21:42:27 +00:00
|
|
|
{
|
2016-05-05 07:33:18 +00:00
|
|
|
auto &mbr_meta1 = meta.members[mbr_idx1];
|
|
|
|
auto &mbr_meta2 = meta.members[mbr_idx2];
|
|
|
|
if (mbr_meta1.builtin != mbr_meta2.builtin)
|
|
|
|
return mbr_meta2.builtin;
|
|
|
|
else
|
2016-12-04 17:32:58 +00:00
|
|
|
switch (sort_aspect)
|
|
|
|
{
|
|
|
|
case Location:
|
|
|
|
return mbr_meta1.location < mbr_meta2.location;
|
2016-12-18 23:48:15 +00:00
|
|
|
case LocationReverse:
|
|
|
|
return mbr_meta1.location > mbr_meta2.location;
|
2016-12-04 17:32:58 +00:00
|
|
|
case Offset:
|
|
|
|
return mbr_meta1.offset < mbr_meta2.offset;
|
2016-12-18 23:48:15 +00:00
|
|
|
case OffsetThenLocationReverse:
|
|
|
|
return (mbr_meta1.offset < mbr_meta2.offset) ||
|
|
|
|
((mbr_meta1.offset == mbr_meta2.offset) && (mbr_meta1.location > mbr_meta2.location));
|
2017-01-15 15:14:21 +00:00
|
|
|
case Alphabetical:
|
2017-06-30 23:10:46 +00:00
|
|
|
return mbr_meta1.alias < mbr_meta2.alias;
|
2016-12-04 17:32:58 +00:00
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
2016-04-06 21:42:27 +00:00
|
|
|
}
|
2017-03-01 02:44:36 +00:00
|
|
|
|
|
|
|
CompilerMSL::MemberSorter::MemberSorter(SPIRType &t, Meta &m, SortAspect sa)
|
|
|
|
: type(t)
|
|
|
|
, meta(m)
|
|
|
|
, sort_aspect(sa)
|
|
|
|
{
|
|
|
|
// Ensure enough meta info is available
|
|
|
|
meta.members.resize(max(type.member_types.size(), meta.members.size()));
|
|
|
|
}
|
2018-04-17 15:43:10 +00:00
|
|
|
|
2018-07-05 13:29:49 +00:00
|
|
|
void CompilerMSL::remap_constexpr_sampler(uint32_t id, const MSLConstexprSampler &sampler)
|
2018-04-17 15:43:10 +00:00
|
|
|
{
|
|
|
|
auto &type = get<SPIRType>(get<SPIRVariable>(id).basetype);
|
|
|
|
if (type.basetype != SPIRType::SampledImage && type.basetype != SPIRType::Sampler)
|
|
|
|
SPIRV_CROSS_THROW("Can only remap SampledImage and Sampler type.");
|
2018-04-17 15:47:15 +00:00
|
|
|
if (!type.array.empty())
|
|
|
|
SPIRV_CROSS_THROW("Can not remap array of samplers.");
|
2019-06-10 13:41:36 +00:00
|
|
|
constexpr_samplers_by_id[id] = sampler;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CompilerMSL::remap_constexpr_sampler_by_binding(uint32_t desc_set, uint32_t binding, const MSLConstexprSampler &sampler)
|
|
|
|
{
|
|
|
|
constexpr_samplers_by_binding[{ desc_set, binding }] = sampler;
|
2018-04-17 15:43:10 +00:00
|
|
|
}
|
2018-06-22 09:30:13 +00:00
|
|
|
|
2018-09-11 16:15:17 +00:00
|
|
|
void CompilerMSL::bitcast_from_builtin_load(uint32_t source_id, std::string &expr, const SPIRType &expr_type)
|
2018-06-22 09:30:13 +00:00
|
|
|
{
|
2018-09-11 16:15:17 +00:00
|
|
|
auto *var = maybe_get_backing_variable(source_id);
|
|
|
|
if (var)
|
|
|
|
source_id = var->self;
|
2018-06-22 09:30:13 +00:00
|
|
|
|
2018-09-11 16:15:17 +00:00
|
|
|
// Only interested in standalone builtin variables.
|
|
|
|
if (!has_decoration(source_id, DecorationBuiltIn))
|
|
|
|
return;
|
|
|
|
|
|
|
|
auto builtin = static_cast<BuiltIn>(get_decoration(source_id, DecorationBuiltIn));
|
|
|
|
auto expected_type = expr_type.basetype;
|
|
|
|
switch (builtin)
|
|
|
|
{
|
|
|
|
case BuiltInGlobalInvocationId:
|
|
|
|
case BuiltInLocalInvocationId:
|
|
|
|
case BuiltInWorkgroupId:
|
|
|
|
case BuiltInLocalInvocationIndex:
|
|
|
|
case BuiltInWorkgroupSize:
|
|
|
|
case BuiltInNumWorkgroups:
|
2018-09-19 14:13:30 +00:00
|
|
|
case BuiltInLayer:
|
|
|
|
case BuiltInViewportIndex:
|
2019-06-12 08:06:59 +00:00
|
|
|
case BuiltInFragStencilRefEXT:
|
2019-06-13 09:33:40 +00:00
|
|
|
case BuiltInPrimitiveId:
|
2018-09-11 16:15:17 +00:00
|
|
|
expected_type = SPIRType::UInt;
|
|
|
|
break;
|
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
case BuiltInTessLevelInner:
|
|
|
|
case BuiltInTessLevelOuter:
|
2019-02-22 18:11:17 +00:00
|
|
|
if (get_execution_model() == ExecutionModelTessellationControl)
|
|
|
|
expected_type = SPIRType::Half;
|
2019-02-04 05:58:46 +00:00
|
|
|
break;
|
|
|
|
|
2018-09-11 16:15:17 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (expected_type != expr_type.basetype)
|
|
|
|
expr = bitcast_expression(expr_type, expected_type, expr);
|
2019-02-19 22:56:43 +00:00
|
|
|
|
|
|
|
if (builtin == BuiltInTessCoord && get_entry_point().flags.get(ExecutionModeQuads) && expr_type.vecsize == 3)
|
|
|
|
{
|
|
|
|
// In SPIR-V, this is always a vec3, even for quads. In Metal, though, it's a float2 for quads.
|
|
|
|
// The code is expecting a float3, so we need to widen this.
|
|
|
|
expr = join("float3(", expr, ", 0)");
|
|
|
|
}
|
2018-06-22 09:30:13 +00:00
|
|
|
}
|
|
|
|
|
2018-09-19 14:13:30 +00:00
|
|
|
void CompilerMSL::bitcast_to_builtin_store(uint32_t target_id, std::string &expr, const SPIRType &expr_type)
|
2018-06-22 09:30:13 +00:00
|
|
|
{
|
2019-02-04 05:58:46 +00:00
|
|
|
auto *var = maybe_get_backing_variable(target_id);
|
|
|
|
if (var)
|
|
|
|
target_id = var->self;
|
|
|
|
|
2018-09-19 14:13:30 +00:00
|
|
|
// Only interested in standalone builtin variables.
|
|
|
|
if (!has_decoration(target_id, DecorationBuiltIn))
|
|
|
|
return;
|
|
|
|
|
|
|
|
auto builtin = static_cast<BuiltIn>(get_decoration(target_id, DecorationBuiltIn));
|
|
|
|
auto expected_type = expr_type.basetype;
|
|
|
|
switch (builtin)
|
|
|
|
{
|
|
|
|
case BuiltInLayer:
|
|
|
|
case BuiltInViewportIndex:
|
2019-06-12 08:06:59 +00:00
|
|
|
case BuiltInFragStencilRefEXT:
|
2019-06-13 09:33:40 +00:00
|
|
|
case BuiltInPrimitiveId:
|
2018-09-19 14:13:30 +00:00
|
|
|
expected_type = SPIRType::UInt;
|
|
|
|
break;
|
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
case BuiltInTessLevelInner:
|
|
|
|
case BuiltInTessLevelOuter:
|
|
|
|
expected_type = SPIRType::Half;
|
|
|
|
break;
|
|
|
|
|
2018-09-19 14:13:30 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (expected_type != expr_type.basetype)
|
|
|
|
{
|
2019-02-04 05:58:46 +00:00
|
|
|
if (expected_type == SPIRType::Half && expr_type.basetype == SPIRType::Float)
|
|
|
|
{
|
|
|
|
// These are of different widths, so we cannot do a straight bitcast.
|
|
|
|
expr = join("half(", expr, ")");
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
auto type = expr_type;
|
|
|
|
type.basetype = expected_type;
|
|
|
|
expr = bitcast_expression(type, expr_type.basetype, expr);
|
|
|
|
}
|
2018-09-19 14:13:30 +00:00
|
|
|
}
|
2018-06-22 09:30:13 +00:00
|
|
|
}
|
2018-07-05 13:29:49 +00:00
|
|
|
|
|
|
|
std::string CompilerMSL::to_initializer_expression(const SPIRVariable &var)
|
|
|
|
{
|
|
|
|
// We risk getting an array initializer here with MSL. If we have an array.
|
|
|
|
// FIXME: We cannot handle non-constant arrays being initialized.
|
|
|
|
// We will need to inject spvArrayCopy here somehow ...
|
|
|
|
auto &type = get<SPIRType>(var.basetype);
|
2018-10-05 09:30:57 +00:00
|
|
|
if (ir.ids[var.initializer].get_type() == TypeConstant &&
|
|
|
|
(!type.array.empty() || type.basetype == SPIRType::Struct))
|
2018-07-05 13:29:49 +00:00
|
|
|
return constant_expression(get<SPIRConstant>(var.initializer));
|
|
|
|
else
|
|
|
|
return CompilerGLSL::to_initializer_expression(var);
|
|
|
|
}
|
2019-03-14 09:29:34 +00:00
|
|
|
|
2019-03-15 13:07:03 +00:00
|
|
|
bool CompilerMSL::descriptor_set_is_argument_buffer(uint32_t desc_set) const
|
|
|
|
{
|
|
|
|
if (!msl_options.argument_buffers)
|
|
|
|
return false;
|
|
|
|
if (desc_set >= kMaxArgumentBuffers)
|
|
|
|
return false;
|
|
|
|
|
2019-03-15 20:53:21 +00:00
|
|
|
return (argument_buffer_discrete_mask & (1u << desc_set)) == 0;
|
2019-03-15 13:07:03 +00:00
|
|
|
}
|
|
|
|
|
2019-03-14 09:29:34 +00:00
|
|
|
void CompilerMSL::analyze_argument_buffers()
|
|
|
|
{
|
|
|
|
// Gather all used resources and sort them out into argument buffers.
|
|
|
|
// Each argument buffer corresponds to a descriptor set in SPIR-V.
|
|
|
|
// The [[id(N)]] values used correspond to the resource mapping we have for MSL.
|
|
|
|
// Otherwise, the binding number is used, but this is generally not safe some types like
|
|
|
|
// combined image samplers and arrays of resources. Metal needs different indices here,
|
|
|
|
// while SPIR-V can have one descriptor set binding. To use argument buffers in practice,
|
|
|
|
// you will need to use the remapping from the API.
|
|
|
|
for (auto &id : argument_buffer_ids)
|
|
|
|
id = 0;
|
|
|
|
|
|
|
|
// Output resources, sorted by resource index & type.
|
|
|
|
struct Resource
|
|
|
|
{
|
|
|
|
SPIRVariable *var;
|
|
|
|
string name;
|
|
|
|
SPIRType::BaseType basetype;
|
|
|
|
uint32_t index;
|
|
|
|
};
|
2019-04-02 09:19:03 +00:00
|
|
|
SmallVector<Resource> resources_in_set[kMaxArgumentBuffers];
|
2019-03-14 09:29:34 +00:00
|
|
|
|
2019-05-09 10:15:45 +00:00
|
|
|
bool set_needs_swizzle_buffer[kMaxArgumentBuffers] = {};
|
2019-05-27 09:59:29 +00:00
|
|
|
bool set_needs_buffer_sizes[kMaxArgumentBuffers] = {};
|
|
|
|
bool needs_buffer_sizes = false;
|
2019-05-09 10:15:45 +00:00
|
|
|
|
2019-03-14 09:29:34 +00:00
|
|
|
ir.for_each_typed_id<SPIRVariable>([&](uint32_t self, SPIRVariable &var) {
|
|
|
|
if ((var.storage == StorageClassUniform || var.storage == StorageClassUniformConstant ||
|
|
|
|
var.storage == StorageClassStorageBuffer) &&
|
|
|
|
!is_hidden_variable(var))
|
|
|
|
{
|
|
|
|
uint32_t desc_set = get_decoration(self, DecorationDescriptorSet);
|
2019-03-15 13:07:03 +00:00
|
|
|
// Ignore if it's part of a push descriptor set.
|
|
|
|
if (!descriptor_set_is_argument_buffer(desc_set))
|
|
|
|
return;
|
|
|
|
|
2019-03-14 09:29:34 +00:00
|
|
|
uint32_t var_id = var.self;
|
|
|
|
auto &type = get_variable_data_type(var);
|
|
|
|
|
|
|
|
if (desc_set >= kMaxArgumentBuffers)
|
|
|
|
SPIRV_CROSS_THROW("Descriptor set index is out of range.");
|
|
|
|
|
2019-06-10 13:41:36 +00:00
|
|
|
const MSLConstexprSampler *constexpr_sampler = nullptr;
|
|
|
|
if (type.basetype == SPIRType::SampledImage || type.basetype == SPIRType::Sampler)
|
|
|
|
{
|
|
|
|
constexpr_sampler = find_constexpr_sampler(var_id);
|
|
|
|
if (constexpr_sampler)
|
|
|
|
{
|
|
|
|
// Mark this ID as a constexpr sampler for later in case it came from set/bindings.
|
|
|
|
constexpr_samplers_by_id[var_id] = *constexpr_sampler;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-14 09:29:34 +00:00
|
|
|
if (type.basetype == SPIRType::SampledImage)
|
|
|
|
{
|
|
|
|
add_resource_name(var_id);
|
|
|
|
|
|
|
|
uint32_t image_resource_index = get_metal_resource_index(var, SPIRType::Image);
|
|
|
|
uint32_t sampler_resource_index = get_metal_resource_index(var, SPIRType::Sampler);
|
|
|
|
|
|
|
|
// Avoid trivial conflicts where we didn't remap.
|
|
|
|
// This will let us at least compile test cases without having to instrument remaps.
|
|
|
|
if (sampler_resource_index == image_resource_index)
|
|
|
|
sampler_resource_index += type.array.empty() ? 1 : to_array_size_literal(type);
|
|
|
|
|
|
|
|
resources_in_set[desc_set].push_back({ &var, to_name(var_id), SPIRType::Image, image_resource_index });
|
|
|
|
|
2019-06-10 13:41:36 +00:00
|
|
|
if (type.image.dim != DimBuffer && !constexpr_sampler)
|
2019-03-14 09:29:34 +00:00
|
|
|
{
|
|
|
|
resources_in_set[desc_set].push_back(
|
|
|
|
{ &var, to_sampler_expression(var_id), SPIRType::Sampler, sampler_resource_index });
|
|
|
|
}
|
|
|
|
}
|
2019-06-10 13:41:36 +00:00
|
|
|
else if (!constexpr_sampler)
|
2019-03-14 09:29:34 +00:00
|
|
|
{
|
|
|
|
// constexpr samplers are not declared as resources.
|
|
|
|
add_resource_name(var_id);
|
|
|
|
resources_in_set[desc_set].push_back(
|
|
|
|
{ &var, to_name(var_id), type.basetype, get_metal_resource_index(var, type.basetype) });
|
|
|
|
}
|
2019-05-09 10:15:45 +00:00
|
|
|
|
|
|
|
// Check if this descriptor set needs a swizzle buffer.
|
|
|
|
if (needs_swizzle_buffer_def && is_sampled_image_type(type))
|
|
|
|
set_needs_swizzle_buffer[desc_set] = true;
|
2019-05-27 09:59:29 +00:00
|
|
|
else if (buffers_requiring_array_length.count(var_id) != 0)
|
|
|
|
{
|
|
|
|
set_needs_buffer_sizes[desc_set] = true;
|
|
|
|
needs_buffer_sizes = true;
|
|
|
|
}
|
2019-03-14 09:29:34 +00:00
|
|
|
}
|
|
|
|
});
|
|
|
|
|
2019-05-27 09:59:29 +00:00
|
|
|
if (needs_swizzle_buffer_def || needs_buffer_sizes)
|
2019-05-09 10:15:45 +00:00
|
|
|
{
|
2019-05-27 09:59:29 +00:00
|
|
|
uint32_t uint_ptr_type_id = 0;
|
2019-05-09 10:15:45 +00:00
|
|
|
|
|
|
|
// We might have to add a swizzle buffer resource to the set.
|
|
|
|
for (uint32_t desc_set = 0; desc_set < kMaxArgumentBuffers; desc_set++)
|
|
|
|
{
|
2019-05-27 09:59:29 +00:00
|
|
|
if (!set_needs_swizzle_buffer[desc_set] && !set_needs_buffer_sizes[desc_set])
|
2019-05-09 10:15:45 +00:00
|
|
|
continue;
|
|
|
|
|
2019-05-27 09:59:29 +00:00
|
|
|
if (uint_ptr_type_id == 0)
|
2019-05-09 10:15:45 +00:00
|
|
|
{
|
|
|
|
uint32_t offset = ir.increase_bound_by(2);
|
|
|
|
uint32_t type_id = offset;
|
2019-05-27 09:59:29 +00:00
|
|
|
uint_ptr_type_id = offset + 1;
|
2019-05-09 10:15:45 +00:00
|
|
|
|
|
|
|
// Create a buffer to hold extra data, including the swizzle constants.
|
|
|
|
SPIRType uint_type;
|
|
|
|
uint_type.basetype = SPIRType::UInt;
|
|
|
|
uint_type.width = 32;
|
|
|
|
set<SPIRType>(type_id, uint_type);
|
|
|
|
|
|
|
|
SPIRType uint_type_pointer = uint_type;
|
|
|
|
uint_type_pointer.pointer = true;
|
|
|
|
uint_type_pointer.pointer_depth = 1;
|
|
|
|
uint_type_pointer.parent_type = type_id;
|
|
|
|
uint_type_pointer.storage = StorageClassUniform;
|
2019-05-27 09:59:29 +00:00
|
|
|
set<SPIRType>(uint_ptr_type_id, uint_type_pointer);
|
|
|
|
set_decoration(uint_ptr_type_id, DecorationArrayStride, 4);
|
2019-05-09 10:15:45 +00:00
|
|
|
}
|
|
|
|
|
2019-05-27 09:59:29 +00:00
|
|
|
if (set_needs_swizzle_buffer[desc_set])
|
|
|
|
{
|
|
|
|
uint32_t var_id = ir.increase_bound_by(1);
|
|
|
|
auto &var = set<SPIRVariable>(var_id, uint_ptr_type_id, StorageClassUniformConstant);
|
|
|
|
set_name(var_id, "spvSwizzleConstants");
|
|
|
|
set_decoration(var_id, DecorationDescriptorSet, desc_set);
|
|
|
|
set_decoration(var_id, DecorationBinding, kSwizzleBufferBinding);
|
|
|
|
resources_in_set[desc_set].push_back(
|
2019-05-27 14:54:13 +00:00
|
|
|
{ &var, to_name(var_id), SPIRType::UInt, get_metal_resource_index(var, SPIRType::UInt) });
|
2019-05-27 09:59:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (set_needs_buffer_sizes[desc_set])
|
|
|
|
{
|
|
|
|
uint32_t var_id = ir.increase_bound_by(1);
|
|
|
|
auto &var = set<SPIRVariable>(var_id, uint_ptr_type_id, StorageClassUniformConstant);
|
|
|
|
set_name(var_id, "spvBufferSizeConstants");
|
|
|
|
set_decoration(var_id, DecorationDescriptorSet, desc_set);
|
|
|
|
set_decoration(var_id, DecorationBinding, kBufferSizeBufferBinding);
|
|
|
|
resources_in_set[desc_set].push_back(
|
2019-05-27 14:54:13 +00:00
|
|
|
{ &var, to_name(var_id), SPIRType::UInt, get_metal_resource_index(var, SPIRType::UInt) });
|
2019-05-27 09:59:29 +00:00
|
|
|
}
|
2019-05-09 10:15:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-14 09:29:34 +00:00
|
|
|
for (uint32_t desc_set = 0; desc_set < kMaxArgumentBuffers; desc_set++)
|
|
|
|
{
|
|
|
|
auto &resources = resources_in_set[desc_set];
|
|
|
|
if (resources.empty())
|
|
|
|
continue;
|
|
|
|
|
2019-03-15 13:07:03 +00:00
|
|
|
assert(descriptor_set_is_argument_buffer(desc_set));
|
|
|
|
|
2019-03-14 09:29:34 +00:00
|
|
|
uint32_t next_id = ir.increase_bound_by(3);
|
|
|
|
uint32_t type_id = next_id + 1;
|
|
|
|
uint32_t ptr_type_id = next_id + 2;
|
|
|
|
argument_buffer_ids[desc_set] = next_id;
|
|
|
|
|
|
|
|
auto &buffer_type = set<SPIRType>(type_id);
|
|
|
|
buffer_type.storage = StorageClassUniform;
|
|
|
|
buffer_type.basetype = SPIRType::Struct;
|
|
|
|
set_name(type_id, join("spvDescriptorSetBuffer", desc_set));
|
|
|
|
|
|
|
|
auto &ptr_type = set<SPIRType>(ptr_type_id);
|
|
|
|
ptr_type = buffer_type;
|
|
|
|
ptr_type.pointer = true;
|
|
|
|
ptr_type.pointer_depth = 1;
|
|
|
|
ptr_type.parent_type = type_id;
|
|
|
|
|
|
|
|
uint32_t buffer_variable_id = next_id;
|
|
|
|
set<SPIRVariable>(buffer_variable_id, ptr_type_id, StorageClassUniform);
|
|
|
|
set_name(buffer_variable_id, join("spvDescriptorSet", desc_set));
|
|
|
|
|
|
|
|
// Ids must be emitted in ID order.
|
|
|
|
sort(begin(resources), end(resources), [&](const Resource &lhs, const Resource &rhs) -> bool {
|
|
|
|
return tie(lhs.index, lhs.basetype) < tie(rhs.index, rhs.basetype);
|
|
|
|
});
|
|
|
|
|
|
|
|
uint32_t member_index = 0;
|
|
|
|
for (auto &resource : resources)
|
|
|
|
{
|
|
|
|
auto &var = *resource.var;
|
|
|
|
auto &type = get_variable_data_type(var);
|
|
|
|
string mbr_name = ensure_valid_name(resource.name, "m");
|
|
|
|
set_member_name(buffer_type.self, member_index, mbr_name);
|
|
|
|
|
|
|
|
if (resource.basetype == SPIRType::Sampler && type.basetype != SPIRType::Sampler)
|
|
|
|
{
|
|
|
|
// Have to synthesize a sampler type here.
|
|
|
|
|
|
|
|
bool type_is_array = !type.array.empty();
|
|
|
|
uint32_t sampler_type_id = ir.increase_bound_by(type_is_array ? 2 : 1);
|
|
|
|
auto &new_sampler_type = set<SPIRType>(sampler_type_id);
|
|
|
|
new_sampler_type.basetype = SPIRType::Sampler;
|
|
|
|
new_sampler_type.storage = StorageClassUniformConstant;
|
|
|
|
|
|
|
|
if (type_is_array)
|
|
|
|
{
|
|
|
|
uint32_t sampler_type_array_id = sampler_type_id + 1;
|
|
|
|
auto &sampler_type_array = set<SPIRType>(sampler_type_array_id);
|
|
|
|
sampler_type_array = new_sampler_type;
|
|
|
|
sampler_type_array.array = type.array;
|
|
|
|
sampler_type_array.array_size_literal = type.array_size_literal;
|
|
|
|
sampler_type_array.parent_type = sampler_type_id;
|
|
|
|
buffer_type.member_types.push_back(sampler_type_array_id);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
buffer_type.member_types.push_back(sampler_type_id);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (resource.basetype == SPIRType::Image || resource.basetype == SPIRType::Sampler ||
|
|
|
|
resource.basetype == SPIRType::SampledImage)
|
|
|
|
{
|
|
|
|
// Drop pointer information when we emit the resources into a struct.
|
|
|
|
buffer_type.member_types.push_back(get_variable_data_type_id(var));
|
|
|
|
set_qualified_name(var.self, join(to_name(buffer_variable_id), ".", mbr_name));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// Resources will be declared as pointers not references, so automatically dereference as appropriate.
|
|
|
|
buffer_type.member_types.push_back(var.basetype);
|
|
|
|
if (type.array.empty())
|
|
|
|
set_qualified_name(var.self, join("(*", to_name(buffer_variable_id), ".", mbr_name, ")"));
|
|
|
|
else
|
|
|
|
set_qualified_name(var.self, join(to_name(buffer_variable_id), ".", mbr_name));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
set_extended_member_decoration(buffer_type.self, member_index, SPIRVCrossDecorationArgumentBufferID,
|
|
|
|
resource.index);
|
|
|
|
set_extended_member_decoration(buffer_type.self, member_index, SPIRVCrossDecorationInterfaceOrigID,
|
|
|
|
var.self);
|
|
|
|
member_index++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-06-10 13:41:36 +00:00
|
|
|
|
|
|
|
bool CompilerMSL::SetBindingPair::operator==(const SetBindingPair &other) const
|
|
|
|
{
|
|
|
|
return desc_set == other.desc_set && binding == other.binding;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CompilerMSL::StageSetBinding::operator==(const StageSetBinding &other) const
|
|
|
|
{
|
|
|
|
return model == other.model && desc_set == other.desc_set && binding == other.binding;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t CompilerMSL::InternalHasher::operator()(const SetBindingPair &value) const
|
|
|
|
{
|
|
|
|
// Quality of hash doesn't really matter here.
|
|
|
|
auto hash_set = std::hash<uint32_t>()(value.desc_set);
|
|
|
|
auto hash_binding = std::hash<uint32_t>()(value.binding);
|
|
|
|
return (hash_set * 0x10001b31) ^ hash_binding;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t CompilerMSL::InternalHasher::operator()(const StageSetBinding &value) const
|
|
|
|
{
|
|
|
|
// Quality of hash doesn't really matter here.
|
|
|
|
auto hash_model = std::hash<uint32_t>()(value.model);
|
|
|
|
auto hash_set = std::hash<uint32_t>()(value.desc_set);
|
|
|
|
auto tmp_hash = (hash_model * 0x10001b31) ^ hash_set;
|
|
|
|
return (tmp_hash * 0x10001b31) ^ value.binding;
|
|
|
|
}
|
|
|
|
|