2016-04-06 21:42:27 +00:00
|
|
|
/*
|
2019-01-04 11:38:35 +00:00
|
|
|
* Copyright 2016-2019 The Brenwill Workshop Ltd.
|
2016-04-06 21:42:27 +00:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2016-11-11 17:04:14 +00:00
|
|
|
#ifndef SPIRV_CROSS_MSL_HPP
|
|
|
|
#define SPIRV_CROSS_MSL_HPP
|
2016-04-06 21:42:27 +00:00
|
|
|
|
|
|
|
#include "spirv_glsl.hpp"
|
2017-01-29 18:28:20 +00:00
|
|
|
#include <map>
|
2016-04-06 21:42:27 +00:00
|
|
|
#include <set>
|
2019-06-21 14:02:22 +00:00
|
|
|
#include <stddef.h>
|
2016-12-19 02:42:10 +00:00
|
|
|
#include <unordered_map>
|
2016-12-18 23:48:15 +00:00
|
|
|
#include <unordered_set>
|
2016-04-06 21:42:27 +00:00
|
|
|
|
2019-03-29 09:29:44 +00:00
|
|
|
namespace SPIRV_CROSS_NAMESPACE
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
|
|
|
|
2018-12-04 19:54:29 +00:00
|
|
|
// Indicates the format of the vertex attribute. Currently limited to specifying
|
|
|
|
// if the attribute is an 8-bit unsigned integer, 16-bit unsigned integer, or
|
|
|
|
// some other format.
|
|
|
|
enum MSLVertexFormat
|
|
|
|
{
|
2019-02-12 10:11:29 +00:00
|
|
|
MSL_VERTEX_FORMAT_OTHER = 0,
|
|
|
|
MSL_VERTEX_FORMAT_UINT8 = 1,
|
|
|
|
MSL_VERTEX_FORMAT_UINT16 = 2,
|
|
|
|
MSL_VERTEX_FORMAT_INT_MAX = 0x7fffffff
|
2018-12-04 19:54:29 +00:00
|
|
|
};
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
// Defines MSL characteristics of a vertex attribute at a particular location.
|
2019-02-12 10:11:29 +00:00
|
|
|
// After compilation, it is possible to query whether or not this location was used.
|
2016-05-05 07:33:18 +00:00
|
|
|
struct MSLVertexAttr
|
|
|
|
{
|
|
|
|
uint32_t location = 0;
|
2017-01-29 18:28:20 +00:00
|
|
|
uint32_t msl_buffer = 0;
|
|
|
|
uint32_t msl_offset = 0;
|
|
|
|
uint32_t msl_stride = 0;
|
|
|
|
bool per_instance = false;
|
2018-12-04 19:54:29 +00:00
|
|
|
MSLVertexFormat format = MSL_VERTEX_FORMAT_OTHER;
|
2019-02-21 04:10:59 +00:00
|
|
|
spv::BuiltIn builtin = spv::BuiltInMax;
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// Matches the binding index of a MSL resource for a binding within a descriptor set.
|
|
|
|
// Taken together, the stage, desc_set and binding combine to form a reference to a resource
|
2019-02-12 10:11:29 +00:00
|
|
|
// descriptor used in a particular shading stage.
|
2019-03-15 20:53:21 +00:00
|
|
|
// If using MSL 2.0 argument buffers, and the descriptor set is not marked as a discrete descriptor set,
|
2019-03-15 13:07:03 +00:00
|
|
|
// the binding reference we remap to will become an [[id(N)]] attribute within
|
2019-03-15 12:07:59 +00:00
|
|
|
// the "descriptor set" argument buffer structure.
|
2019-03-15 20:53:21 +00:00
|
|
|
// For resources which are bound in the "classic" MSL 1.0 way or discrete descriptors, the remap will become a
|
2019-03-15 13:07:03 +00:00
|
|
|
// [[buffer(N)]], [[texture(N)]] or [[sampler(N)]] depending on the resource types used.
|
2016-05-05 07:33:18 +00:00
|
|
|
struct MSLResourceBinding
|
|
|
|
{
|
2019-02-12 10:11:29 +00:00
|
|
|
spv::ExecutionModel stage = spv::ExecutionModelMax;
|
2016-05-05 07:33:18 +00:00
|
|
|
uint32_t desc_set = 0;
|
|
|
|
uint32_t binding = 0;
|
2019-03-04 09:08:31 +00:00
|
|
|
uint32_t msl_buffer = 0;
|
|
|
|
uint32_t msl_texture = 0;
|
|
|
|
uint32_t msl_sampler = 0;
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
|
|
|
|
2018-04-17 15:43:10 +00:00
|
|
|
enum MSLSamplerCoord
|
|
|
|
{
|
2019-02-12 10:11:29 +00:00
|
|
|
MSL_SAMPLER_COORD_NORMALIZED = 0,
|
|
|
|
MSL_SAMPLER_COORD_PIXEL = 1,
|
|
|
|
MSL_SAMPLER_INT_MAX = 0x7fffffff
|
2018-04-17 15:43:10 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
enum MSLSamplerFilter
|
|
|
|
{
|
2019-02-12 10:11:29 +00:00
|
|
|
MSL_SAMPLER_FILTER_NEAREST = 0,
|
|
|
|
MSL_SAMPLER_FILTER_LINEAR = 1,
|
|
|
|
MSL_SAMPLER_FILTER_INT_MAX = 0x7fffffff
|
2018-04-17 15:43:10 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
enum MSLSamplerMipFilter
|
|
|
|
{
|
2019-02-12 10:11:29 +00:00
|
|
|
MSL_SAMPLER_MIP_FILTER_NONE = 0,
|
|
|
|
MSL_SAMPLER_MIP_FILTER_NEAREST = 1,
|
|
|
|
MSL_SAMPLER_MIP_FILTER_LINEAR = 2,
|
|
|
|
MSL_SAMPLER_MIP_FILTER_INT_MAX = 0x7fffffff
|
2018-04-17 15:43:10 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
enum MSLSamplerAddress
|
|
|
|
{
|
2019-02-12 10:11:29 +00:00
|
|
|
MSL_SAMPLER_ADDRESS_CLAMP_TO_ZERO = 0,
|
|
|
|
MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE = 1,
|
|
|
|
MSL_SAMPLER_ADDRESS_CLAMP_TO_BORDER = 2,
|
|
|
|
MSL_SAMPLER_ADDRESS_REPEAT = 3,
|
|
|
|
MSL_SAMPLER_ADDRESS_MIRRORED_REPEAT = 4,
|
|
|
|
MSL_SAMPLER_ADDRESS_INT_MAX = 0x7fffffff
|
2018-04-17 15:43:10 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
enum MSLSamplerCompareFunc
|
|
|
|
{
|
2019-02-12 10:11:29 +00:00
|
|
|
MSL_SAMPLER_COMPARE_FUNC_NEVER = 0,
|
|
|
|
MSL_SAMPLER_COMPARE_FUNC_LESS = 1,
|
|
|
|
MSL_SAMPLER_COMPARE_FUNC_LESS_EQUAL = 2,
|
|
|
|
MSL_SAMPLER_COMPARE_FUNC_GREATER = 3,
|
|
|
|
MSL_SAMPLER_COMPARE_FUNC_GREATER_EQUAL = 4,
|
|
|
|
MSL_SAMPLER_COMPARE_FUNC_EQUAL = 5,
|
|
|
|
MSL_SAMPLER_COMPARE_FUNC_NOT_EQUAL = 6,
|
|
|
|
MSL_SAMPLER_COMPARE_FUNC_ALWAYS = 7,
|
|
|
|
MSL_SAMPLER_COMPARE_FUNC_INT_MAX = 0x7fffffff
|
2018-04-17 15:43:10 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
enum MSLSamplerBorderColor
|
|
|
|
{
|
2019-02-12 10:11:29 +00:00
|
|
|
MSL_SAMPLER_BORDER_COLOR_TRANSPARENT_BLACK = 0,
|
|
|
|
MSL_SAMPLER_BORDER_COLOR_OPAQUE_BLACK = 1,
|
|
|
|
MSL_SAMPLER_BORDER_COLOR_OPAQUE_WHITE = 2,
|
|
|
|
MSL_SAMPLER_BORDER_COLOR_INT_MAX = 0x7fffffff
|
2018-04-17 15:43:10 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct MSLConstexprSampler
|
|
|
|
{
|
2018-04-18 14:19:55 +00:00
|
|
|
MSLSamplerCoord coord = MSL_SAMPLER_COORD_NORMALIZED;
|
|
|
|
MSLSamplerFilter min_filter = MSL_SAMPLER_FILTER_NEAREST;
|
|
|
|
MSLSamplerFilter mag_filter = MSL_SAMPLER_FILTER_NEAREST;
|
|
|
|
MSLSamplerMipFilter mip_filter = MSL_SAMPLER_MIP_FILTER_NONE;
|
|
|
|
MSLSamplerAddress s_address = MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE;
|
|
|
|
MSLSamplerAddress t_address = MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE;
|
|
|
|
MSLSamplerAddress r_address = MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE;
|
|
|
|
MSLSamplerCompareFunc compare_func = MSL_SAMPLER_COMPARE_FUNC_NEVER;
|
|
|
|
MSLSamplerBorderColor border_color = MSL_SAMPLER_BORDER_COLOR_TRANSPARENT_BLACK;
|
|
|
|
float lod_clamp_min = 0.0f;
|
|
|
|
float lod_clamp_max = 1000.0f;
|
|
|
|
int max_anisotropy = 1;
|
|
|
|
|
|
|
|
bool compare_enable = false;
|
|
|
|
bool lod_clamp_enable = false;
|
|
|
|
bool anisotropy_enable = false;
|
2018-04-17 15:43:10 +00:00
|
|
|
};
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
// Special constant used in a MSLResourceBinding desc_set
|
|
|
|
// element to indicate the bindings for the push constants.
|
2018-07-16 22:10:12 +00:00
|
|
|
static const uint32_t kPushConstDescSet = ~(0u);
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
// Special constant used in a MSLResourceBinding binding
|
|
|
|
// element to indicate the bindings for the push constants.
|
|
|
|
static const uint32_t kPushConstBinding = 0;
|
|
|
|
|
2019-05-09 10:15:45 +00:00
|
|
|
// Special constant used in a MSLResourceBinding binding
|
|
|
|
// element to indicate the buffer binding for swizzle buffers.
|
|
|
|
static const uint32_t kSwizzleBufferBinding = ~(1u);
|
2019-03-14 09:29:34 +00:00
|
|
|
|
2019-05-27 09:59:29 +00:00
|
|
|
// Special constant used in a MSLResourceBinding binding
|
|
|
|
// element to indicate the buffer binding for buffer size buffers to support OpArrayLength.
|
|
|
|
static const uint32_t kBufferSizeBufferBinding = ~(2u);
|
|
|
|
|
2019-06-24 08:45:13 +00:00
|
|
|
// Special constant used in a MSLResourceBinding binding
|
|
|
|
// element to indicate the buffer binding used for the argument buffer itself.
|
|
|
|
// This buffer binding should be kept as small as possible as all automatic bindings for buffers
|
|
|
|
// will start at max(kArgumentBufferBinding) + 1.
|
|
|
|
static const uint32_t kArgumentBufferBinding = ~(3u);
|
|
|
|
|
2019-05-09 10:15:45 +00:00
|
|
|
static const uint32_t kMaxArgumentBuffers = 8;
|
2019-02-06 20:45:26 +00:00
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
// Decompiles SPIR-V to Metal Shading Language
|
|
|
|
class CompilerMSL : public CompilerGLSL
|
|
|
|
{
|
|
|
|
public:
|
2017-03-12 21:42:51 +00:00
|
|
|
// Options for compiling to Metal Shading Language
|
|
|
|
struct Options
|
|
|
|
{
|
2018-04-03 12:08:15 +00:00
|
|
|
typedef enum
|
|
|
|
{
|
2019-02-12 10:11:29 +00:00
|
|
|
iOS = 0,
|
|
|
|
macOS = 1
|
2017-12-26 18:39:07 +00:00
|
|
|
} Platform;
|
|
|
|
|
|
|
|
Platform platform = macOS;
|
2017-11-10 21:40:33 +00:00
|
|
|
uint32_t msl_version = make_msl_version(1, 2);
|
2018-06-28 21:00:26 +00:00
|
|
|
uint32_t texel_buffer_texture_width = 4096; // Width of 2D Metal textures used as 1D texel buffers
|
2019-05-09 10:15:45 +00:00
|
|
|
uint32_t swizzle_buffer_index = 30;
|
2019-02-06 21:17:14 +00:00
|
|
|
uint32_t indirect_params_buffer_index = 29;
|
|
|
|
uint32_t shader_output_buffer_index = 28;
|
2019-02-04 05:58:46 +00:00
|
|
|
uint32_t shader_patch_output_buffer_index = 27;
|
|
|
|
uint32_t shader_tess_factor_buffer_index = 26;
|
2019-05-27 09:59:29 +00:00
|
|
|
uint32_t buffer_size_buffer_index = 25;
|
2019-05-31 17:06:20 +00:00
|
|
|
uint32_t view_mask_buffer_index = 24;
|
2019-02-04 05:58:46 +00:00
|
|
|
uint32_t shader_input_wg_index = 0;
|
2019-07-13 02:50:50 +00:00
|
|
|
uint32_t device_index = 0;
|
2017-11-06 02:34:42 +00:00
|
|
|
bool enable_point_size_builtin = true;
|
2018-07-26 20:40:32 +00:00
|
|
|
bool disable_rasterization = false;
|
2019-01-08 22:33:32 +00:00
|
|
|
bool capture_output_to_buffer = false;
|
2018-09-20 01:36:33 +00:00
|
|
|
bool swizzle_texture_samples = false;
|
2019-02-06 05:47:50 +00:00
|
|
|
bool tess_domain_origin_lower_left = false;
|
2019-05-31 17:06:20 +00:00
|
|
|
bool multiview = false;
|
2019-07-13 02:50:50 +00:00
|
|
|
bool view_index_from_device_index = false;
|
2019-03-15 12:07:59 +00:00
|
|
|
|
|
|
|
// Enable use of MSL 2.0 indirect argument buffers.
|
|
|
|
// MSL 2.0 must also be enabled.
|
2019-03-14 09:29:34 +00:00
|
|
|
bool argument_buffers = false;
|
2017-11-07 20:38:13 +00:00
|
|
|
|
2019-01-14 13:53:47 +00:00
|
|
|
// Fragment output in MSL must have at least as many components as the render pass.
|
|
|
|
// Add support to explicit pad out components.
|
|
|
|
bool pad_fragment_output_components = false;
|
|
|
|
|
2019-04-23 10:17:21 +00:00
|
|
|
// Requires MSL 2.1, use the native support for texel buffers.
|
|
|
|
bool texture_buffer_native = false;
|
|
|
|
|
2017-12-26 18:39:07 +00:00
|
|
|
bool is_ios()
|
|
|
|
{
|
|
|
|
return platform == iOS;
|
|
|
|
}
|
2018-01-06 04:22:36 +00:00
|
|
|
|
2017-12-26 18:39:07 +00:00
|
|
|
bool is_macos()
|
|
|
|
{
|
|
|
|
return platform == macOS;
|
|
|
|
}
|
|
|
|
|
2017-11-07 20:38:13 +00:00
|
|
|
void set_msl_version(uint32_t major, uint32_t minor = 0, uint32_t patch = 0)
|
|
|
|
{
|
2017-11-10 21:40:33 +00:00
|
|
|
msl_version = make_msl_version(major, minor, patch);
|
2017-11-07 20:38:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool supports_msl_version(uint32_t major, uint32_t minor = 0, uint32_t patch = 0)
|
|
|
|
{
|
2017-11-10 21:40:33 +00:00
|
|
|
return msl_version >= make_msl_version(major, minor, patch);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t make_msl_version(uint32_t major, uint32_t minor = 0, uint32_t patch = 0)
|
|
|
|
{
|
|
|
|
return (major * 10000) + (minor * 100) + patch;
|
2017-11-07 20:38:13 +00:00
|
|
|
}
|
2017-03-12 21:42:51 +00:00
|
|
|
};
|
|
|
|
|
2018-03-09 14:25:25 +00:00
|
|
|
const Options &get_msl_options() const
|
|
|
|
{
|
|
|
|
return msl_options;
|
|
|
|
}
|
|
|
|
|
|
|
|
void set_msl_options(const Options &opts)
|
|
|
|
{
|
|
|
|
msl_options = opts;
|
2017-03-12 21:42:51 +00:00
|
|
|
}
|
|
|
|
|
2018-07-26 20:40:32 +00:00
|
|
|
// Provide feedback to calling API to allow runtime to disable pipeline
|
|
|
|
// rasterization if vertex shader requires rasterization to be disabled.
|
|
|
|
bool get_is_rasterization_disabled() const
|
|
|
|
{
|
2019-02-04 05:58:46 +00:00
|
|
|
return is_rasterization_disabled && (get_entry_point().model == spv::ExecutionModelVertex ||
|
2019-02-19 22:44:57 +00:00
|
|
|
get_entry_point().model == spv::ExecutionModelTessellationControl ||
|
|
|
|
get_entry_point().model == spv::ExecutionModelTessellationEvaluation);
|
2018-07-26 20:40:32 +00:00
|
|
|
}
|
|
|
|
|
2018-09-24 18:38:27 +00:00
|
|
|
// Provide feedback to calling API to allow it to pass an auxiliary
|
2019-05-09 10:15:45 +00:00
|
|
|
// swizzle buffer if the shader needs it.
|
|
|
|
bool needs_swizzle_buffer() const
|
2018-09-24 18:38:27 +00:00
|
|
|
{
|
2019-05-09 10:15:45 +00:00
|
|
|
return used_swizzle_buffer;
|
2018-09-24 18:38:27 +00:00
|
|
|
}
|
|
|
|
|
2019-05-27 09:59:29 +00:00
|
|
|
// Provide feedback to calling API to allow it to pass a buffer
|
|
|
|
// containing STORAGE_BUFFER buffer sizes to support OpArrayLength.
|
|
|
|
bool needs_buffer_size_buffer() const
|
|
|
|
{
|
|
|
|
return !buffers_requiring_array_length.empty();
|
|
|
|
}
|
|
|
|
|
2019-05-31 17:06:20 +00:00
|
|
|
// Provide feedback to calling API to allow it to pass a buffer
|
|
|
|
// containing the view mask for the current multiview subpass.
|
|
|
|
bool needs_view_mask_buffer() const
|
|
|
|
{
|
2019-07-13 02:50:50 +00:00
|
|
|
return msl_options.multiview && !msl_options.view_index_from_device_index;
|
2019-05-31 17:06:20 +00:00
|
|
|
}
|
|
|
|
|
2019-02-06 23:22:12 +00:00
|
|
|
// Provide feedback to calling API to allow it to pass an output
|
|
|
|
// buffer if the shader needs it.
|
|
|
|
bool needs_output_buffer() const
|
|
|
|
{
|
|
|
|
return capture_output_to_buffer && stage_out_var_id != 0;
|
|
|
|
}
|
|
|
|
|
2019-02-04 05:58:46 +00:00
|
|
|
// Provide feedback to calling API to allow it to pass a patch output
|
|
|
|
// buffer if the shader needs it.
|
|
|
|
bool needs_patch_output_buffer() const
|
|
|
|
{
|
|
|
|
return capture_output_to_buffer && patch_stage_out_var_id != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Provide feedback to calling API to allow it to pass an input threadgroup
|
|
|
|
// buffer if the shader needs it.
|
|
|
|
bool needs_input_threadgroup_mem() const
|
|
|
|
{
|
|
|
|
return capture_output_to_buffer && stage_in_var_id != 0;
|
|
|
|
}
|
|
|
|
|
2019-04-09 10:46:23 +00:00
|
|
|
explicit CompilerMSL(std::vector<uint32_t> spirv);
|
2019-02-12 10:11:29 +00:00
|
|
|
CompilerMSL(const uint32_t *ir, size_t word_count);
|
|
|
|
explicit CompilerMSL(const ParsedIR &ir);
|
|
|
|
explicit CompilerMSL(ParsedIR &&ir);
|
|
|
|
|
|
|
|
// attr is a vertex attribute binding used to match
|
|
|
|
// vertex content locations to MSL attributes. If vertex attributes are provided,
|
|
|
|
// is_msl_vertex_attribute_used() will return true after calling ::compile() if
|
|
|
|
// the location was used by the MSL code.
|
|
|
|
void add_msl_vertex_attribute(const MSLVertexAttr &attr);
|
|
|
|
|
|
|
|
// resource is a resource binding to indicate the MSL buffer,
|
|
|
|
// texture or sampler index to use for a particular SPIR-V description set
|
|
|
|
// and binding. If resource bindings are provided,
|
|
|
|
// is_msl_resource_binding_used() will return true after calling ::compile() if
|
|
|
|
// the set/binding combination was used by the MSL code.
|
|
|
|
void add_msl_resource_binding(const MSLResourceBinding &resource);
|
|
|
|
|
2019-03-15 13:07:03 +00:00
|
|
|
// When using MSL argument buffers, we can force "classic" MSL 1.0 binding schemes for certain descriptor sets.
|
|
|
|
// This corresponds to VK_KHR_push_descriptor in Vulkan.
|
2019-03-15 20:53:21 +00:00
|
|
|
void add_discrete_descriptor_set(uint32_t desc_set);
|
2019-03-15 13:07:03 +00:00
|
|
|
|
2019-02-12 10:11:29 +00:00
|
|
|
// Query after compilation is done. This allows you to check if a location or set/binding combination was used by the shader.
|
|
|
|
bool is_msl_vertex_attribute_used(uint32_t location);
|
2019-06-10 13:41:36 +00:00
|
|
|
|
|
|
|
// NOTE: Only resources which are remapped using add_msl_resource_binding will be reported here.
|
|
|
|
// Constexpr samplers are always assumed to be emitted.
|
|
|
|
// No specific MSLResourceBinding remapping is required for constexpr samplers as long as they are remapped
|
|
|
|
// by remap_constexpr_sampler(_by_binding).
|
2019-02-12 10:11:29 +00:00
|
|
|
bool is_msl_resource_binding_used(spv::ExecutionModel model, uint32_t set, uint32_t binding);
|
|
|
|
|
2019-06-21 11:19:59 +00:00
|
|
|
// This must only be called after a successful call to CompilerMSL::compile().
|
|
|
|
// For a variable resource ID obtained through reflection API, report the automatically assigned resource index.
|
|
|
|
// If the descriptor set was part of an argument buffer, report the [[id(N)]],
|
|
|
|
// or [[buffer/texture/sampler]] binding for other resources.
|
|
|
|
// If the resource was a combined image sampler, report the image binding here,
|
|
|
|
// use the _secondary version of this call to query the sampler half of the resource.
|
|
|
|
// If no binding exists, uint32_t(-1) is returned.
|
|
|
|
uint32_t get_automatic_msl_resource_binding(uint32_t id) const;
|
|
|
|
|
|
|
|
// Same as get_automatic_msl_resource_binding, but should only be used for combined image samplers, in which case the
|
|
|
|
// sampler's binding is returned instead. For any other resource type, -1 is returned.
|
|
|
|
uint32_t get_automatic_msl_resource_binding_secondary(uint32_t id) const;
|
|
|
|
|
2019-02-12 10:11:29 +00:00
|
|
|
// Compiles the SPIR-V code into Metal Shading Language.
|
|
|
|
std::string compile() override;
|
|
|
|
|
|
|
|
// Remap a sampler with ID to a constexpr sampler.
|
|
|
|
// Older iOS targets must use constexpr samplers in certain cases (PCF),
|
|
|
|
// so a static sampler must be used.
|
|
|
|
// The sampler will not consume a binding, but be declared in the entry point as a constexpr sampler.
|
|
|
|
// This can be used on both combined image/samplers (sampler2D) or standalone samplers.
|
|
|
|
// The remapped sampler must not be an array of samplers.
|
2019-06-10 13:41:36 +00:00
|
|
|
// Prefer remap_constexpr_sampler_by_binding unless you're also doing reflection anyways.
|
2019-02-12 10:11:29 +00:00
|
|
|
void remap_constexpr_sampler(uint32_t id, const MSLConstexprSampler &sampler);
|
|
|
|
|
2019-06-10 13:41:36 +00:00
|
|
|
// Same as remap_constexpr_sampler, except you provide set/binding, rather than variable ID.
|
|
|
|
// Remaps based on ID take priority over set/binding remaps.
|
|
|
|
void remap_constexpr_sampler_by_binding(uint32_t desc_set, uint32_t binding, const MSLConstexprSampler &sampler);
|
|
|
|
|
2019-02-12 10:11:29 +00:00
|
|
|
// If using CompilerMSL::Options::pad_fragment_output_components, override the number of components we expect
|
|
|
|
// to use for a particular location. The default is 4 if number of components is not overridden.
|
|
|
|
void set_fragment_output_components(uint32_t location, uint32_t components);
|
|
|
|
|
|
|
|
protected:
|
2017-05-19 22:14:08 +00:00
|
|
|
// An enum of SPIR-V functions that are implemented in additional
|
|
|
|
// source code that is added to the shader if necessary.
|
|
|
|
enum SPVFuncImpl
|
|
|
|
{
|
|
|
|
SPVFuncImplNone,
|
|
|
|
SPVFuncImplMod,
|
|
|
|
SPVFuncImplRadians,
|
|
|
|
SPVFuncImplDegrees,
|
|
|
|
SPVFuncImplFindILsb,
|
|
|
|
SPVFuncImplFindSMsb,
|
|
|
|
SPVFuncImplFindUMsb,
|
2018-11-07 12:24:21 +00:00
|
|
|
SPVFuncImplSSign,
|
2018-09-11 10:58:03 +00:00
|
|
|
SPVFuncImplArrayCopyMultidimBase,
|
|
|
|
// Unfortunately, we cannot use recursive templates in the MSL compiler properly,
|
|
|
|
// so stamp out variants up to some arbitrary maximum.
|
|
|
|
SPVFuncImplArrayCopy = SPVFuncImplArrayCopyMultidimBase + 1,
|
|
|
|
SPVFuncImplArrayOfArrayCopy2Dim = SPVFuncImplArrayCopyMultidimBase + 2,
|
|
|
|
SPVFuncImplArrayOfArrayCopy3Dim = SPVFuncImplArrayCopyMultidimBase + 3,
|
|
|
|
SPVFuncImplArrayOfArrayCopy4Dim = SPVFuncImplArrayCopyMultidimBase + 4,
|
|
|
|
SPVFuncImplArrayOfArrayCopy5Dim = SPVFuncImplArrayCopyMultidimBase + 5,
|
|
|
|
SPVFuncImplArrayOfArrayCopy6Dim = SPVFuncImplArrayCopyMultidimBase + 6,
|
2018-06-26 21:30:21 +00:00
|
|
|
SPVFuncImplTexelBufferCoords,
|
2017-05-19 22:14:08 +00:00
|
|
|
SPVFuncImplInverse4x4,
|
2018-02-23 15:48:16 +00:00
|
|
|
SPVFuncImplInverse3x3,
|
|
|
|
SPVFuncImplInverse2x2,
|
2018-09-20 01:36:33 +00:00
|
|
|
SPVFuncImplTextureSwizzle,
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
SPVFuncImplSubgroupBallot,
|
|
|
|
SPVFuncImplSubgroupBallotBitExtract,
|
|
|
|
SPVFuncImplSubgroupBallotFindLSB,
|
|
|
|
SPVFuncImplSubgroupBallotFindMSB,
|
|
|
|
SPVFuncImplSubgroupBallotBitCount,
|
|
|
|
SPVFuncImplSubgroupAllEqual,
|
2019-07-03 10:24:58 +00:00
|
|
|
SPVFuncImplReflectScalar,
|
|
|
|
SPVFuncImplRefractScalar,
|
2019-07-17 09:24:31 +00:00
|
|
|
SPVFuncImplFaceForwardScalar,
|
2018-09-11 10:58:03 +00:00
|
|
|
SPVFuncImplArrayCopyMultidimMax = 6
|
2017-05-19 22:14:08 +00:00
|
|
|
};
|
|
|
|
|
2018-08-31 18:46:02 +00:00
|
|
|
void emit_binary_unord_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op);
|
2016-10-27 22:47:17 +00:00
|
|
|
void emit_instruction(const Instruction &instr) override;
|
2016-11-12 09:04:50 +00:00
|
|
|
void emit_glsl_op(uint32_t result_type, uint32_t result_id, uint32_t op, const uint32_t *args,
|
|
|
|
uint32_t count) override;
|
2019-07-11 16:49:34 +00:00
|
|
|
void emit_spv_amd_shader_trinary_minmax_op(uint32_t result_type, uint32_t result_id, uint32_t op,
|
|
|
|
const uint32_t *args, uint32_t count) override;
|
2016-05-05 07:33:18 +00:00
|
|
|
void emit_header() override;
|
2018-03-12 12:09:25 +00:00
|
|
|
void emit_function_prototype(SPIRFunction &func, const Bitset &return_flags) override;
|
2016-05-05 07:33:18 +00:00
|
|
|
void emit_sampled_image_op(uint32_t result_type, uint32_t result_id, uint32_t image_id, uint32_t samp_id) override;
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
void emit_subgroup_op(const Instruction &i) override;
|
2016-05-05 07:33:18 +00:00
|
|
|
void emit_fixup() override;
|
2019-01-08 22:33:32 +00:00
|
|
|
std::string to_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index,
|
|
|
|
const std::string &qualifier = "");
|
2017-03-11 17:17:22 +00:00
|
|
|
void emit_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index,
|
2017-11-27 15:00:56 +00:00
|
|
|
const std::string &qualifier = "", uint32_t base_offset = 0) override;
|
2019-07-22 08:23:39 +00:00
|
|
|
void emit_struct_padding_target(const SPIRType &type) override;
|
2017-05-30 00:45:05 +00:00
|
|
|
std::string type_to_glsl(const SPIRType &type, uint32_t id = 0) override;
|
|
|
|
std::string image_type_glsl(const SPIRType &type, uint32_t id = 0) override;
|
2018-04-03 12:00:34 +00:00
|
|
|
std::string sampler_type(const SPIRType &type);
|
2017-07-24 08:07:02 +00:00
|
|
|
std::string builtin_to_glsl(spv::BuiltIn builtin, spv::StorageClass storage) override;
|
2016-10-24 13:24:24 +00:00
|
|
|
std::string to_func_call_arg(uint32_t id) override;
|
2017-03-07 12:27:04 +00:00
|
|
|
std::string to_name(uint32_t id, bool allow_alias = true) const override;
|
2016-12-28 23:36:42 +00:00
|
|
|
std::string to_function_name(uint32_t img, const SPIRType &imgtype, bool is_fetch, bool is_gather, bool is_proj,
|
2019-06-21 14:02:22 +00:00
|
|
|
bool has_array_offsets, bool has_offset, bool has_grad, bool has_dref, uint32_t lod,
|
|
|
|
uint32_t minlod) override;
|
2016-12-28 23:36:42 +00:00
|
|
|
std::string to_function_args(uint32_t img, const SPIRType &imgtype, bool is_fetch, bool is_gather, bool is_proj,
|
|
|
|
uint32_t coord, uint32_t coord_components, uint32_t dref, uint32_t grad_x,
|
|
|
|
uint32_t grad_y, uint32_t lod, uint32_t coffset, uint32_t offset, uint32_t bias,
|
2019-06-11 09:10:16 +00:00
|
|
|
uint32_t comp, uint32_t sample, uint32_t minlod, bool *p_forward) override;
|
2018-07-05 13:29:49 +00:00
|
|
|
std::string to_initializer_expression(const SPIRVariable &var) override;
|
2019-07-19 11:03:08 +00:00
|
|
|
std::string unpack_expression_type(std::string expr_str, const SPIRType &type, uint32_t physical_type_id, bool is_packed, bool row_major) override;
|
2019-07-22 10:03:12 +00:00
|
|
|
|
2017-05-19 22:14:08 +00:00
|
|
|
std::string bitcast_glsl_op(const SPIRType &result_type, const SPIRType &argument_type) override;
|
2017-05-30 00:45:05 +00:00
|
|
|
bool skip_argument(uint32_t id) const override;
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
std::string to_member_reference(uint32_t base, const SPIRType &type, uint32_t index, bool ptr_chain) override;
|
2017-11-06 02:34:42 +00:00
|
|
|
std::string to_qualifiers_glsl(uint32_t id) override;
|
|
|
|
void replace_illegal_names() override;
|
2017-11-16 03:44:42 +00:00
|
|
|
void declare_undefined_values() override;
|
2018-02-08 12:06:29 +00:00
|
|
|
void declare_constant_arrays();
|
2019-02-15 23:21:38 +00:00
|
|
|
bool is_patch_block(const SPIRType &type);
|
2018-01-04 21:33:45 +00:00
|
|
|
bool is_non_native_row_major_matrix(uint32_t id) override;
|
|
|
|
bool member_is_non_native_row_major_matrix(const SPIRType &type, uint32_t index) override;
|
2019-07-18 11:34:47 +00:00
|
|
|
std::string convert_row_major_matrix(std::string exp_str, const SPIRType &exp_type, uint32_t physical_type_id, bool is_packed) override;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2017-01-20 16:24:44 +00:00
|
|
|
void preprocess_op_codes();
|
2016-05-05 07:33:18 +00:00
|
|
|
void localize_global_variables();
|
2016-10-24 13:24:24 +00:00
|
|
|
void extract_global_variables_from_functions();
|
2017-11-06 02:34:42 +00:00
|
|
|
void mark_packable_structs();
|
|
|
|
void mark_as_packable(SPIRType &type);
|
2017-01-15 15:39:03 +00:00
|
|
|
|
2017-01-31 16:02:44 +00:00
|
|
|
std::unordered_map<uint32_t, std::set<uint32_t>> function_global_vars;
|
|
|
|
void extract_global_variables_from_function(uint32_t func_id, std::set<uint32_t> &added_arg_ids,
|
2016-12-18 23:48:15 +00:00
|
|
|
std::unordered_set<uint32_t> &global_var_ids,
|
|
|
|
std::unordered_set<uint32_t> &processed_func_ids);
|
2019-02-04 05:58:46 +00:00
|
|
|
uint32_t add_interface_block(spv::StorageClass storage, bool patch = false);
|
|
|
|
uint32_t add_interface_block_pointer(uint32_t ib_var_id, spv::StorageClass storage);
|
2019-01-08 10:03:59 +00:00
|
|
|
|
|
|
|
void add_variable_to_interface_block(spv::StorageClass storage, const std::string &ib_var_ref, SPIRType &ib_type,
|
2019-02-04 05:58:46 +00:00
|
|
|
SPIRVariable &var, bool strip_array);
|
2019-01-08 10:03:59 +00:00
|
|
|
void add_composite_variable_to_interface_block(spv::StorageClass storage, const std::string &ib_var_ref,
|
2019-02-04 05:58:46 +00:00
|
|
|
SPIRType &ib_type, SPIRVariable &var, bool strip_array);
|
2019-01-08 10:03:59 +00:00
|
|
|
void add_plain_variable_to_interface_block(spv::StorageClass storage, const std::string &ib_var_ref,
|
2019-02-04 05:58:46 +00:00
|
|
|
SPIRType &ib_type, SPIRVariable &var, bool strip_array);
|
2019-01-08 10:03:59 +00:00
|
|
|
void add_plain_member_variable_to_interface_block(spv::StorageClass storage, const std::string &ib_var_ref,
|
2019-02-04 05:58:46 +00:00
|
|
|
SPIRType &ib_type, SPIRVariable &var, uint32_t index,
|
|
|
|
bool strip_array);
|
2019-01-08 10:03:59 +00:00
|
|
|
void add_composite_member_variable_to_interface_block(spv::StorageClass storage, const std::string &ib_var_ref,
|
2019-02-04 05:58:46 +00:00
|
|
|
SPIRType &ib_type, SPIRVariable &var, uint32_t index,
|
|
|
|
bool strip_array);
|
|
|
|
uint32_t get_accumulated_member_location(const SPIRVariable &var, uint32_t mbr_idx, bool strip_array);
|
2019-02-22 18:11:17 +00:00
|
|
|
void add_tess_level_input_to_interface_block(const std::string &ib_var_ref, SPIRType &ib_type, SPIRVariable &var);
|
2019-02-04 05:58:46 +00:00
|
|
|
|
|
|
|
void fix_up_interface_member_indices(spv::StorageClass storage, uint32_t ib_type_id);
|
2019-01-08 10:03:59 +00:00
|
|
|
|
2017-01-01 19:43:20 +00:00
|
|
|
void mark_location_as_used_by_shader(uint32_t location, spv::StorageClass storage);
|
2018-02-13 19:44:40 +00:00
|
|
|
uint32_t ensure_correct_builtin_type(uint32_t type_id, spv::BuiltIn builtin);
|
2018-11-28 22:26:45 +00:00
|
|
|
uint32_t ensure_correct_attribute_type(uint32_t type_id, uint32_t location);
|
2017-01-15 15:39:03 +00:00
|
|
|
|
2017-06-15 19:24:22 +00:00
|
|
|
void emit_custom_functions();
|
2016-05-05 07:33:18 +00:00
|
|
|
void emit_resources();
|
2019-01-10 08:49:33 +00:00
|
|
|
void emit_specialization_constants_and_structs();
|
2016-05-05 07:33:18 +00:00
|
|
|
void emit_interface_block(uint32_t ib_var_id);
|
2017-11-06 02:34:42 +00:00
|
|
|
bool maybe_emit_array_assignment(uint32_t id_lhs, uint32_t id_rhs);
|
2019-07-19 10:53:10 +00:00
|
|
|
|
2019-01-16 23:52:53 +00:00
|
|
|
void fix_up_shader_inputs_outputs();
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
std::string func_type_decl(SPIRType &type);
|
2019-03-14 09:29:34 +00:00
|
|
|
std::string entry_point_args_classic(bool append_comma);
|
|
|
|
std::string entry_point_args_argument_buffer(bool append_comma);
|
|
|
|
std::string entry_point_arg_stage_in();
|
|
|
|
void entry_point_args_builtin(std::string &args);
|
2019-03-15 20:53:21 +00:00
|
|
|
void entry_point_args_discrete_descriptors(std::string &args);
|
2016-10-24 13:24:24 +00:00
|
|
|
std::string to_qualified_member_name(const SPIRType &type, uint32_t index);
|
2016-11-27 20:00:06 +00:00
|
|
|
std::string ensure_valid_name(std::string name, std::string pfx);
|
2016-05-05 07:33:18 +00:00
|
|
|
std::string to_sampler_expression(uint32_t id);
|
2019-01-13 23:31:50 +00:00
|
|
|
std::string to_swizzle_expression(uint32_t id);
|
2019-05-27 09:59:29 +00:00
|
|
|
std::string to_buffer_size_expression(uint32_t id);
|
2016-05-05 07:33:18 +00:00
|
|
|
std::string builtin_qualifier(spv::BuiltIn builtin);
|
2019-06-13 09:33:40 +00:00
|
|
|
std::string builtin_type_decl(spv::BuiltIn builtin, uint32_t id = 0);
|
2017-01-29 18:28:20 +00:00
|
|
|
std::string built_in_func_arg(spv::BuiltIn builtin, bool prefix_comma);
|
2016-05-05 07:33:18 +00:00
|
|
|
std::string member_attribute_qualifier(const SPIRType &type, uint32_t index);
|
|
|
|
std::string argument_decl(const SPIRFunction::Parameter &arg);
|
2017-04-25 20:32:16 +00:00
|
|
|
std::string round_fp_tex_coords(std::string tex_coords, bool coord_is_fp);
|
2016-05-05 07:33:18 +00:00
|
|
|
uint32_t get_metal_resource_index(SPIRVariable &var, SPIRType::BaseType basetype);
|
2018-09-05 22:31:10 +00:00
|
|
|
uint32_t get_ordered_member_location(uint32_t type_id, uint32_t index, uint32_t *comp = nullptr);
|
2019-07-18 11:34:47 +00:00
|
|
|
|
|
|
|
// MSL packing rules. These compute the effective packing rules as observed by the MSL compiler in the MSL output.
|
|
|
|
// These values can change depending on various extended decorations which control packing rules.
|
|
|
|
// We need to make these rules match up with SPIR-V declared rules.
|
2019-07-18 14:39:25 +00:00
|
|
|
uint32_t get_declared_type_size_msl(const SPIRType &type, bool packed, bool row_major) const;
|
|
|
|
uint32_t get_declared_type_array_stride_msl(const SPIRType &type, bool packed, bool row_major) const;
|
|
|
|
uint32_t get_declared_type_matrix_stride_msl(const SPIRType &type, bool packed, bool row_major) const;
|
|
|
|
uint32_t get_declared_type_alignment_msl(const SPIRType &type, bool packed, bool row_major) const;
|
2019-07-18 11:48:27 +00:00
|
|
|
|
|
|
|
uint32_t get_declared_struct_member_size_msl(const SPIRType &struct_type, uint32_t index) const;
|
|
|
|
uint32_t get_declared_struct_member_array_stride_msl(const SPIRType &struct_type, uint32_t index) const;
|
|
|
|
uint32_t get_declared_struct_member_matrix_stride_msl(const SPIRType &struct_type, uint32_t index) const;
|
|
|
|
uint32_t get_declared_struct_member_alignment_msl(const SPIRType &struct_type, uint32_t index) const;
|
|
|
|
|
|
|
|
const SPIRType &get_physical_member_type(const SPIRType &struct_type, uint32_t index) const;
|
|
|
|
|
2019-07-22 08:23:39 +00:00
|
|
|
uint32_t get_declared_struct_size_msl(const SPIRType &struct_type, bool ignore_alignment = false, bool ignore_padding = false) const;
|
2019-07-18 11:34:47 +00:00
|
|
|
|
2016-12-28 23:36:42 +00:00
|
|
|
std::string to_component_argument(uint32_t id);
|
2019-07-18 11:34:47 +00:00
|
|
|
void align_struct(SPIRType &ib_type, std::unordered_set<uint32_t> &aligned_structs);
|
2019-07-19 12:18:14 +00:00
|
|
|
void mark_scalar_layout_structs(const SPIRType &ib_type);
|
2019-07-18 14:39:25 +00:00
|
|
|
void ensure_member_packing_rules_msl(SPIRType &ib_type, uint32_t index);
|
|
|
|
bool validate_member_packing_rules_msl(const SPIRType &type, uint32_t index) const;
|
2017-05-19 22:14:08 +00:00
|
|
|
std::string get_argument_address_space(const SPIRVariable &argument);
|
2019-03-15 11:05:35 +00:00
|
|
|
std::string get_type_address_space(const SPIRType &type, uint32_t id);
|
MSL: Handle coherent, volatile, and restrict.
This maps them to their MSL equivalents. I've mapped `Coherent` to
`volatile` since MSL doesn't have anything weaker than `volatile` but
stronger than nothing.
As part of this, I had to remove the implicit `volatile` added for
atomic operation casts. If the buffer is already `coherent` or
`volatile`, then we would add a second `volatile`, which would be
redundant. I think this is OK even when the buffer *doesn't* have
`coherent`: `T *` is implicitly convertible to `volatile T *`, but not
vice-versa. It seems to compile OK at any rate. (Note that the
non-`volatile` overloads of the atomic functions documented in the spec
aren't present in the MSL 2.2 stdlib headers.)
`restrict` is tricky, because in MSL, as in C++, it needs to go *after*
the asterisk or ampersand for the pointer type it's modifying.
Another issue is that, in the `Simple`, `GLSL450`, and `Vulkan` memory
models, `Restrict` is the default (i.e. does not need to be specified);
but MSL likely follows the `OpenCL` model where `Aliased` is the
default. We probably need to implicitly set either `Restrict` or
`Aliased` depending on the module's declared memory model.
2019-07-10 16:17:40 +00:00
|
|
|
const char *to_restrict(uint32_t id, bool space = true);
|
2019-02-04 05:58:46 +00:00
|
|
|
SPIRType &get_stage_in_struct_type();
|
2019-01-08 22:33:32 +00:00
|
|
|
SPIRType &get_stage_out_struct_type();
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
SPIRType &get_patch_stage_in_struct_type();
|
2019-02-04 05:58:46 +00:00
|
|
|
SPIRType &get_patch_stage_out_struct_type();
|
|
|
|
std::string get_tess_factor_struct_name();
|
2017-05-19 22:14:08 +00:00
|
|
|
void emit_atomic_func_op(uint32_t result_type, uint32_t result_id, const char *op, uint32_t mem_order_1,
|
|
|
|
uint32_t mem_order_2, bool has_mem_order_2, uint32_t op0, uint32_t op1 = 0,
|
2018-09-13 13:56:23 +00:00
|
|
|
bool op1_is_pointer = false, bool op1_is_literal = false, uint32_t op2 = 0);
|
2017-05-19 22:14:08 +00:00
|
|
|
const char *get_memory_order(uint32_t spv_mem_sem);
|
|
|
|
void add_pragma_line(const std::string &line);
|
2018-02-11 21:52:57 +00:00
|
|
|
void add_typedef_line(const std::string &line);
|
2017-11-06 02:34:42 +00:00
|
|
|
void emit_barrier(uint32_t id_exe_scope, uint32_t id_mem_scope, uint32_t id_mem_sem);
|
2018-02-05 11:37:41 +00:00
|
|
|
void emit_array_copy(const std::string &lhs, uint32_t rhs_id) override;
|
2018-02-09 10:27:23 +00:00
|
|
|
void build_implicit_builtins();
|
2019-05-27 09:59:29 +00:00
|
|
|
uint32_t build_constant_uint_array_pointer();
|
2018-04-17 15:43:10 +00:00
|
|
|
void emit_entry_point_declarations() override;
|
2018-02-09 11:13:33 +00:00
|
|
|
uint32_t builtin_frag_coord_id = 0;
|
2018-09-12 19:05:52 +00:00
|
|
|
uint32_t builtin_sample_id_id = 0;
|
2019-01-08 22:33:32 +00:00
|
|
|
uint32_t builtin_vertex_idx_id = 0;
|
|
|
|
uint32_t builtin_base_vertex_id = 0;
|
|
|
|
uint32_t builtin_instance_idx_id = 0;
|
|
|
|
uint32_t builtin_base_instance_id = 0;
|
2019-05-31 17:06:20 +00:00
|
|
|
uint32_t builtin_view_idx_id = 0;
|
|
|
|
uint32_t builtin_layer_id = 0;
|
2019-02-04 05:58:46 +00:00
|
|
|
uint32_t builtin_invocation_id_id = 0;
|
|
|
|
uint32_t builtin_primitive_id_id = 0;
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
uint32_t builtin_subgroup_invocation_id_id = 0;
|
|
|
|
uint32_t builtin_subgroup_size_id = 0;
|
2019-05-09 10:15:45 +00:00
|
|
|
uint32_t swizzle_buffer_id = 0;
|
2019-05-27 09:59:29 +00:00
|
|
|
uint32_t buffer_size_buffer_id = 0;
|
2019-05-31 17:06:20 +00:00
|
|
|
uint32_t view_mask_buffer_id = 0;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2018-06-22 09:30:13 +00:00
|
|
|
void bitcast_to_builtin_store(uint32_t target_id, std::string &expr, const SPIRType &expr_type) override;
|
|
|
|
void bitcast_from_builtin_load(uint32_t source_id, std::string &expr, const SPIRType &expr_type) override;
|
2019-01-17 11:21:16 +00:00
|
|
|
void emit_store_statement(uint32_t lhs_expression, uint32_t rhs_expression) override;
|
2018-06-22 09:30:13 +00:00
|
|
|
|
2018-09-24 17:10:27 +00:00
|
|
|
void analyze_sampled_image_usage();
|
2018-09-23 00:36:11 +00:00
|
|
|
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
bool emit_tessellation_access_chain(const uint32_t *ops, uint32_t length);
|
2019-02-20 06:33:46 +00:00
|
|
|
bool is_out_of_bounds_tessellation_level(uint32_t id_lhs);
|
2019-02-14 08:28:17 +00:00
|
|
|
|
2019-05-31 11:19:33 +00:00
|
|
|
void mark_implicit_builtin(spv::StorageClass storage, spv::BuiltIn builtin, uint32_t id);
|
|
|
|
|
2019-06-27 13:04:22 +00:00
|
|
|
std::string convert_to_f32(const std::string &expr, uint32_t components);
|
|
|
|
|
2018-03-09 14:25:25 +00:00
|
|
|
Options msl_options;
|
2017-05-19 22:14:08 +00:00
|
|
|
std::set<SPVFuncImpl> spv_function_implementations;
|
2019-02-12 10:11:29 +00:00
|
|
|
std::unordered_map<uint32_t, MSLVertexAttr> vtx_attrs_by_location;
|
|
|
|
std::unordered_map<uint32_t, MSLVertexAttr> vtx_attrs_by_builtin;
|
|
|
|
std::unordered_set<uint32_t> vtx_attrs_in_use;
|
2019-01-14 13:53:47 +00:00
|
|
|
std::unordered_map<uint32_t, uint32_t> fragment_output_components;
|
2018-01-06 05:51:25 +00:00
|
|
|
std::set<std::string> pragma_lines;
|
2018-02-11 21:52:57 +00:00
|
|
|
std::set<std::string> typedef_lines;
|
2019-04-02 09:19:03 +00:00
|
|
|
SmallVector<uint32_t> vars_needing_early_declaration;
|
2019-02-12 10:11:29 +00:00
|
|
|
|
2019-06-10 13:41:36 +00:00
|
|
|
struct SetBindingPair
|
|
|
|
{
|
|
|
|
uint32_t desc_set;
|
|
|
|
uint32_t binding;
|
|
|
|
bool operator==(const SetBindingPair &other) const;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct StageSetBinding
|
|
|
|
{
|
|
|
|
spv::ExecutionModel model;
|
|
|
|
uint32_t desc_set;
|
|
|
|
uint32_t binding;
|
|
|
|
bool operator==(const StageSetBinding &other) const;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct InternalHasher
|
|
|
|
{
|
|
|
|
size_t operator()(const SetBindingPair &value) const;
|
|
|
|
size_t operator()(const StageSetBinding &value) const;
|
|
|
|
};
|
|
|
|
|
|
|
|
std::unordered_map<StageSetBinding, std::pair<MSLResourceBinding, bool>, InternalHasher> resource_bindings;
|
2019-07-09 13:31:01 +00:00
|
|
|
|
2019-02-12 10:11:29 +00:00
|
|
|
uint32_t next_metal_resource_index_buffer = 0;
|
|
|
|
uint32_t next_metal_resource_index_texture = 0;
|
|
|
|
uint32_t next_metal_resource_index_sampler = 0;
|
2019-07-09 13:31:01 +00:00
|
|
|
// Intentionally uninitialized, works around MSVC 2013 bug.
|
|
|
|
uint32_t next_metal_resource_ids[kMaxArgumentBuffers];
|
2019-02-12 10:11:29 +00:00
|
|
|
|
2017-01-01 19:43:20 +00:00
|
|
|
uint32_t stage_in_var_id = 0;
|
2016-05-05 07:33:18 +00:00
|
|
|
uint32_t stage_out_var_id = 0;
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
uint32_t patch_stage_in_var_id = 0;
|
2019-02-04 05:58:46 +00:00
|
|
|
uint32_t patch_stage_out_var_id = 0;
|
|
|
|
uint32_t stage_in_ptr_var_id = 0;
|
|
|
|
uint32_t stage_out_ptr_var_id = 0;
|
2018-09-23 00:36:11 +00:00
|
|
|
bool has_sampled_images = false;
|
2017-01-29 18:28:20 +00:00
|
|
|
bool needs_vertex_idx_arg = false;
|
|
|
|
bool needs_instance_idx_arg = false;
|
2018-07-26 20:40:32 +00:00
|
|
|
bool is_rasterization_disabled = false;
|
2019-01-08 22:33:32 +00:00
|
|
|
bool capture_output_to_buffer = false;
|
2019-05-09 10:15:45 +00:00
|
|
|
bool needs_swizzle_buffer_def = false;
|
|
|
|
bool used_swizzle_buffer = false;
|
2019-02-22 18:11:17 +00:00
|
|
|
bool added_builtin_tess_level = false;
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
bool needs_subgroup_invocation_id = false;
|
2016-05-05 07:33:18 +00:00
|
|
|
std::string qual_pos_var_name;
|
|
|
|
std::string stage_in_var_name = "in";
|
|
|
|
std::string stage_out_var_name = "out";
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
std::string patch_stage_in_var_name = "patchIn";
|
2019-02-04 05:58:46 +00:00
|
|
|
std::string patch_stage_out_var_name = "patchOut";
|
2016-05-05 07:33:18 +00:00
|
|
|
std::string sampler_name_suffix = "Smplr";
|
2019-01-13 23:31:50 +00:00
|
|
|
std::string swizzle_name_suffix = "Swzl";
|
2019-05-27 09:59:29 +00:00
|
|
|
std::string buffer_size_name_suffix = "BufferSize";
|
2019-02-04 05:58:46 +00:00
|
|
|
std::string input_wg_var_name = "gl_in";
|
2019-01-08 22:33:32 +00:00
|
|
|
std::string output_buffer_var_name = "spvOut";
|
2019-02-04 05:58:46 +00:00
|
|
|
std::string patch_output_buffer_var_name = "spvPatchOut";
|
|
|
|
std::string tess_factor_buffer_var_name = "spvTessLevel";
|
2017-11-06 02:34:42 +00:00
|
|
|
spv::Op previous_instruction_opcode = spv::OpNop;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2019-06-10 13:41:36 +00:00
|
|
|
// Must be ordered since declaration is in a specific order.
|
|
|
|
std::map<uint32_t, MSLConstexprSampler> constexpr_samplers_by_id;
|
|
|
|
std::unordered_map<SetBindingPair, MSLConstexprSampler, InternalHasher> constexpr_samplers_by_binding;
|
|
|
|
const MSLConstexprSampler *find_constexpr_sampler(uint32_t id) const;
|
|
|
|
|
2019-05-27 09:59:29 +00:00
|
|
|
std::unordered_set<uint32_t> buffers_requiring_array_length;
|
2019-04-02 09:19:03 +00:00
|
|
|
SmallVector<uint32_t> buffer_arrays;
|
2018-04-17 15:43:10 +00:00
|
|
|
|
2019-03-14 09:29:34 +00:00
|
|
|
uint32_t argument_buffer_ids[kMaxArgumentBuffers];
|
2019-03-15 20:53:21 +00:00
|
|
|
uint32_t argument_buffer_discrete_mask = 0;
|
2019-03-14 09:29:34 +00:00
|
|
|
void analyze_argument_buffers();
|
2019-03-15 13:07:03 +00:00
|
|
|
bool descriptor_set_is_argument_buffer(uint32_t desc_set) const;
|
2019-03-14 09:29:34 +00:00
|
|
|
|
2019-01-14 13:53:47 +00:00
|
|
|
uint32_t get_target_components_for_fragment_location(uint32_t location) const;
|
|
|
|
uint32_t build_extended_vector_type(uint32_t type_id, uint32_t components);
|
|
|
|
|
2019-04-09 10:28:46 +00:00
|
|
|
bool suppress_missing_prototypes = false;
|
|
|
|
|
2017-01-20 16:24:44 +00:00
|
|
|
// OpcodeHandler that handles several MSL preprocessing operations.
|
2017-01-20 16:33:59 +00:00
|
|
|
struct OpCodePreprocessor : OpcodeHandler
|
2016-12-04 17:32:58 +00:00
|
|
|
{
|
2017-01-20 16:33:59 +00:00
|
|
|
OpCodePreprocessor(CompilerMSL &compiler_)
|
2016-12-21 21:31:13 +00:00
|
|
|
: compiler(compiler_)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override;
|
2017-11-06 02:34:42 +00:00
|
|
|
CompilerMSL::SPVFuncImpl get_spv_func_impl(spv::Op opcode, const uint32_t *args);
|
2018-07-27 20:53:36 +00:00
|
|
|
void check_resource_write(uint32_t var_id);
|
2016-12-21 21:31:13 +00:00
|
|
|
|
2017-01-20 16:33:59 +00:00
|
|
|
CompilerMSL &compiler;
|
2017-11-06 02:34:42 +00:00
|
|
|
std::unordered_map<uint32_t, uint32_t> result_types;
|
2017-01-20 16:33:59 +00:00
|
|
|
bool suppress_missing_prototypes = false;
|
2017-05-19 22:14:08 +00:00
|
|
|
bool uses_atomics = false;
|
2018-07-27 20:53:36 +00:00
|
|
|
bool uses_resource_write = false;
|
MSL: Add support for subgroup operations.
Some support for subgroups is present starting in Metal 2.0 on both iOS
and macOS. macOS gains more complete support in 10.14 (Metal 2.1).
Some restrictions are present. On iOS and on macOS 10.13, the
implementation of `OpGroupNonUniformElect` is incorrect: if thread 0 has
already terminated or is not executing a conditional branch, the first
thread that *is* will falsely believe itself not to be. Unfortunately,
this operation is part of the "basic" feature set; without it, subgroups
cannot be supported at all.
The `SubgroupSize` and `SubgroupLocalInvocationId` builtins are only
available in compute shaders (and, by extension, tessellation control
shaders), despite SPIR-V making them available in all stages. This
limits the usefulness of some of the subgroup operations in fragment
shaders.
Although Metal on macOS supports some clustered, inclusive, and
exclusive operations, it does not support them all. In particular,
inclusive and exclusive min, max, and, or, and xor; as well as cluster
sizes other than 4 are not supported. If this becomes a problem, they
could be emulated, but at a significant performance cost due to the need
for non-uniform operations.
2019-05-15 21:03:30 +00:00
|
|
|
bool needs_subgroup_invocation_id = false;
|
2016-12-04 17:32:58 +00:00
|
|
|
};
|
|
|
|
|
2018-09-23 00:36:11 +00:00
|
|
|
// OpcodeHandler that scans for uses of sampled images
|
|
|
|
struct SampledImageScanner : OpcodeHandler
|
|
|
|
{
|
|
|
|
SampledImageScanner(CompilerMSL &compiler_)
|
|
|
|
: compiler(compiler_)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
bool handle(spv::Op opcode, const uint32_t *args, uint32_t) override;
|
|
|
|
|
|
|
|
CompilerMSL &compiler;
|
|
|
|
};
|
|
|
|
|
2016-12-21 21:31:13 +00:00
|
|
|
// Sorts the members of a SPIRType and associated Meta info based on a settable sorting
|
|
|
|
// aspect, which defines which aspect of the struct members will be used to sort them.
|
|
|
|
// Regardless of the sorting aspect, built-in members always appear at the end of the struct.
|
|
|
|
struct MemberSorter
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
2016-12-21 21:31:13 +00:00
|
|
|
enum SortAspect
|
|
|
|
{
|
|
|
|
Location,
|
|
|
|
LocationReverse,
|
|
|
|
Offset,
|
|
|
|
OffsetThenLocationReverse,
|
2017-01-15 15:14:21 +00:00
|
|
|
Alphabetical
|
2016-12-21 21:31:13 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
void sort();
|
|
|
|
bool operator()(uint32_t mbr_idx1, uint32_t mbr_idx2);
|
2017-03-01 02:44:36 +00:00
|
|
|
MemberSorter(SPIRType &t, Meta &m, SortAspect sa);
|
|
|
|
|
2016-12-21 21:31:13 +00:00
|
|
|
SPIRType &type;
|
|
|
|
Meta &meta;
|
|
|
|
SortAspect sort_aspect;
|
|
|
|
};
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
2019-04-02 09:19:03 +00:00
|
|
|
} // namespace SPIRV_CROSS_NAMESPACE
|
2016-04-06 21:42:27 +00:00
|
|
|
|
|
|
|
#endif
|