2016-03-02 17:09:16 +00:00
|
|
|
/*
|
2021-01-14 15:07:49 +00:00
|
|
|
* Copyright 2015-2021 Arm Limited
|
2021-05-08 08:47:48 +00:00
|
|
|
* SPDX-License-Identifier: Apache-2.0 OR MIT
|
2016-03-02 17:09:16 +00:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2020-11-25 14:22:08 +00:00
|
|
|
/*
|
|
|
|
* At your option, you may choose to accept this material under either:
|
|
|
|
* 1. The Apache License, Version 2.0, found at <http://www.apache.org/licenses/LICENSE-2.0>, or
|
|
|
|
* 2. The MIT License, found at <http://opensource.org/licenses/MIT>.
|
|
|
|
*/
|
|
|
|
|
2016-11-11 17:04:14 +00:00
|
|
|
#ifndef SPIRV_CROSS_COMMON_HPP
|
|
|
|
#define SPIRV_CROSS_COMMON_HPP
|
2016-03-02 17:09:16 +00:00
|
|
|
|
2022-05-02 13:27:09 +00:00
|
|
|
#ifndef SPV_ENABLE_UTILITY_CODE
|
|
|
|
#define SPV_ENABLE_UTILITY_CODE
|
|
|
|
#endif
|
2017-03-25 15:28:44 +00:00
|
|
|
#include "spirv.hpp"
|
2022-05-02 13:27:09 +00:00
|
|
|
|
2019-04-09 11:07:30 +00:00
|
|
|
#include "spirv_cross_containers.hpp"
|
2019-04-09 13:10:02 +00:00
|
|
|
#include "spirv_cross_error_handling.hpp"
|
2019-09-05 10:43:40 +00:00
|
|
|
#include <functional>
|
2016-03-02 17:09:16 +00:00
|
|
|
|
2019-03-29 09:29:44 +00:00
|
|
|
// A bit crude, but allows projects which embed SPIRV-Cross statically to
|
|
|
|
// effectively hide all the symbols from other projects.
|
|
|
|
// There is a case where we have:
|
|
|
|
// - Project A links against SPIRV-Cross statically.
|
|
|
|
// - Project A links against Project B statically.
|
|
|
|
// - Project B links against SPIRV-Cross statically (might be a different version).
|
|
|
|
// This leads to a conflict with extremely bizarre results.
|
|
|
|
// By overriding the namespace in one of the project builds, we can work around this.
|
|
|
|
// If SPIRV-Cross is embedded in dynamic libraries,
|
|
|
|
// prefer using -fvisibility=hidden on GCC/Clang instead.
|
|
|
|
#ifdef SPIRV_CROSS_NAMESPACE_OVERRIDE
|
|
|
|
#define SPIRV_CROSS_NAMESPACE SPIRV_CROSS_NAMESPACE_OVERRIDE
|
|
|
|
#else
|
|
|
|
#define SPIRV_CROSS_NAMESPACE spirv_cross
|
|
|
|
#endif
|
|
|
|
|
|
|
|
namespace SPIRV_CROSS_NAMESPACE
|
2016-03-02 17:09:16 +00:00
|
|
|
{
|
2016-05-05 07:33:18 +00:00
|
|
|
namespace inner
|
|
|
|
{
|
|
|
|
template <typename T>
|
2019-04-02 09:19:03 +00:00
|
|
|
void join_helper(StringStream<> &stream, T &&t)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
|
|
|
stream << std::forward<T>(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T, typename... Ts>
|
2019-04-02 09:19:03 +00:00
|
|
|
void join_helper(StringStream<> &stream, T &&t, Ts &&... ts)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
|
|
|
stream << std::forward<T>(t);
|
|
|
|
join_helper(stream, std::forward<Ts>(ts)...);
|
|
|
|
}
|
2018-04-03 12:08:15 +00:00
|
|
|
} // namespace inner
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2018-03-12 12:09:25 +00:00
|
|
|
class Bitset
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
Bitset() = default;
|
|
|
|
explicit inline Bitset(uint64_t lower_)
|
2018-03-13 13:05:33 +00:00
|
|
|
: lower(lower_)
|
2018-03-12 12:09:25 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
inline bool get(uint32_t bit) const
|
|
|
|
{
|
|
|
|
if (bit < 64)
|
|
|
|
return (lower & (1ull << bit)) != 0;
|
|
|
|
else
|
|
|
|
return higher.count(bit) != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void set(uint32_t bit)
|
|
|
|
{
|
|
|
|
if (bit < 64)
|
|
|
|
lower |= 1ull << bit;
|
|
|
|
else
|
|
|
|
higher.insert(bit);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void clear(uint32_t bit)
|
|
|
|
{
|
|
|
|
if (bit < 64)
|
|
|
|
lower &= ~(1ull << bit);
|
|
|
|
else
|
|
|
|
higher.erase(bit);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline uint64_t get_lower() const
|
|
|
|
{
|
|
|
|
return lower;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void reset()
|
|
|
|
{
|
|
|
|
lower = 0;
|
|
|
|
higher.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void merge_and(const Bitset &other)
|
|
|
|
{
|
|
|
|
lower &= other.lower;
|
|
|
|
std::unordered_set<uint32_t> tmp_set;
|
|
|
|
for (auto &v : higher)
|
|
|
|
if (other.higher.count(v) != 0)
|
|
|
|
tmp_set.insert(v);
|
|
|
|
higher = std::move(tmp_set);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void merge_or(const Bitset &other)
|
|
|
|
{
|
|
|
|
lower |= other.lower;
|
|
|
|
for (auto &v : other.higher)
|
|
|
|
higher.insert(v);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline bool operator==(const Bitset &other) const
|
|
|
|
{
|
|
|
|
if (lower != other.lower)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (higher.size() != other.higher.size())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (auto &v : higher)
|
|
|
|
if (other.higher.count(v) == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline bool operator!=(const Bitset &other) const
|
|
|
|
{
|
|
|
|
return !(*this == other);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename Op>
|
|
|
|
void for_each_bit(const Op &op) const
|
|
|
|
{
|
|
|
|
// TODO: Add ctz-based iteration.
|
|
|
|
for (uint32_t i = 0; i < 64; i++)
|
|
|
|
{
|
|
|
|
if (lower & (1ull << i))
|
|
|
|
op(i);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (higher.empty())
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Need to enforce an order here for reproducible results,
|
|
|
|
// but hitting this path should happen extremely rarely, so having this slow path is fine.
|
2019-04-02 09:19:03 +00:00
|
|
|
SmallVector<uint32_t> bits;
|
2018-03-12 12:09:25 +00:00
|
|
|
bits.reserve(higher.size());
|
|
|
|
for (auto &v : higher)
|
|
|
|
bits.push_back(v);
|
|
|
|
std::sort(std::begin(bits), std::end(bits));
|
|
|
|
|
|
|
|
for (auto &v : bits)
|
|
|
|
op(v);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline bool empty() const
|
|
|
|
{
|
|
|
|
return lower == 0 && higher.empty();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
// The most common bits to set are all lower than 64,
|
|
|
|
// so optimize for this case. Bits spilling outside 64 go into a slower data structure.
|
|
|
|
// In almost all cases, higher data structure will not be used.
|
|
|
|
uint64_t lower = 0;
|
|
|
|
std::unordered_set<uint32_t> higher;
|
|
|
|
};
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
// Helper template to avoid lots of nasty string temporary munging.
|
|
|
|
template <typename... Ts>
|
|
|
|
std::string join(Ts &&... ts)
|
|
|
|
{
|
2019-04-02 09:19:03 +00:00
|
|
|
StringStream<> stream;
|
2016-05-05 07:33:18 +00:00
|
|
|
inner::join_helper(stream, std::forward<Ts>(ts)...);
|
|
|
|
return stream.str();
|
|
|
|
}
|
|
|
|
|
2019-06-21 09:16:51 +00:00
|
|
|
inline std::string merge(const SmallVector<std::string> &list, const char *between = ", ")
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
2019-04-02 09:19:03 +00:00
|
|
|
StringStream<> stream;
|
2016-05-05 07:33:18 +00:00
|
|
|
for (auto &elem : list)
|
|
|
|
{
|
2019-04-02 09:19:03 +00:00
|
|
|
stream << elem;
|
2016-05-05 07:33:18 +00:00
|
|
|
if (&elem != &list.back())
|
2019-06-21 09:16:51 +00:00
|
|
|
stream << between;
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
2019-04-02 09:19:03 +00:00
|
|
|
return stream.str();
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
|
|
|
|
2019-02-28 10:26:26 +00:00
|
|
|
// Make sure we don't accidentally call this with float or doubles with SFINAE.
|
|
|
|
// Have to use the radix-aware overload.
|
|
|
|
template <typename T, typename std::enable_if<!std::is_floating_point<T>::value, int>::type = 0>
|
|
|
|
inline std::string convert_to_string(const T &t)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
2019-02-28 10:26:26 +00:00
|
|
|
return std::to_string(t);
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
|
|
|
|
2021-09-30 14:17:04 +00:00
|
|
|
static inline std::string convert_to_string(int32_t value)
|
|
|
|
{
|
|
|
|
// INT_MIN is ... special on some backends. If we use a decimal literal, and negate it, we
|
|
|
|
// could accidentally promote the literal to long first, then negate.
|
|
|
|
// To workaround it, emit int(0x80000000) instead.
|
2024-01-22 08:49:18 +00:00
|
|
|
if (value == (std::numeric_limits<int32_t>::min)())
|
2021-09-30 14:17:04 +00:00
|
|
|
return "int(0x80000000)";
|
|
|
|
else
|
|
|
|
return std::to_string(value);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline std::string convert_to_string(int64_t value, const std::string &int64_type, bool long_long_literal_suffix)
|
|
|
|
{
|
|
|
|
// INT64_MIN is ... special on some backends.
|
|
|
|
// If we use a decimal literal, and negate it, we might overflow the representable numbers.
|
|
|
|
// To workaround it, emit int(0x80000000) instead.
|
2024-01-22 08:49:18 +00:00
|
|
|
if (value == (std::numeric_limits<int64_t>::min)())
|
2021-09-30 14:17:04 +00:00
|
|
|
return join(int64_type, "(0x8000000000000000u", (long_long_literal_suffix ? "ll" : "l"), ")");
|
|
|
|
else
|
|
|
|
return std::to_string(value) + (long_long_literal_suffix ? "ll" : "l");
|
|
|
|
}
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
// Allow implementations to set a convenient standard precision
|
2016-04-08 19:12:40 +00:00
|
|
|
#ifndef SPIRV_CROSS_FLT_FMT
|
2016-05-05 07:33:18 +00:00
|
|
|
#define SPIRV_CROSS_FLT_FMT "%.32g"
|
2016-04-06 21:42:27 +00:00
|
|
|
#endif
|
|
|
|
|
2020-04-09 15:30:20 +00:00
|
|
|
// Disable sprintf and strcat warnings.
|
|
|
|
// We cannot rely on snprintf and family existing because, ..., MSVC.
|
|
|
|
#if defined(__clang__) || defined(__GNUC__)
|
|
|
|
#pragma GCC diagnostic push
|
|
|
|
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
|
|
|
#elif defined(_MSC_VER)
|
2017-01-12 09:57:44 +00:00
|
|
|
#pragma warning(push)
|
2017-01-13 15:32:54 +00:00
|
|
|
#pragma warning(disable : 4996)
|
2017-01-12 09:57:44 +00:00
|
|
|
#endif
|
|
|
|
|
2019-02-28 10:26:26 +00:00
|
|
|
static inline void fixup_radix_point(char *str, char radix_point)
|
|
|
|
{
|
|
|
|
// Setting locales is a very risky business in multi-threaded program,
|
|
|
|
// so just fixup locales instead. We only need to care about the radix point.
|
|
|
|
if (radix_point != '.')
|
|
|
|
{
|
|
|
|
while (*str != '\0')
|
|
|
|
{
|
|
|
|
if (*str == radix_point)
|
|
|
|
*str = '.';
|
|
|
|
str++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
inline std::string convert_to_string(float t, char locale_radix_point)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
|
|
|
// std::to_string for floating point values is broken.
|
|
|
|
// Fallback to something more sane.
|
|
|
|
char buf[64];
|
|
|
|
sprintf(buf, SPIRV_CROSS_FLT_FMT, t);
|
2019-02-28 10:26:26 +00:00
|
|
|
fixup_radix_point(buf, locale_radix_point);
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
// Ensure that the literal is float.
|
|
|
|
if (!strchr(buf, '.') && !strchr(buf, 'e'))
|
|
|
|
strcat(buf, ".0");
|
|
|
|
return buf;
|
2016-03-02 17:09:16 +00:00
|
|
|
}
|
|
|
|
|
2019-02-28 10:26:26 +00:00
|
|
|
inline std::string convert_to_string(double t, char locale_radix_point)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
|
|
|
// std::to_string for floating point values is broken.
|
|
|
|
// Fallback to something more sane.
|
|
|
|
char buf[64];
|
|
|
|
sprintf(buf, SPIRV_CROSS_FLT_FMT, t);
|
2019-02-28 10:26:26 +00:00
|
|
|
fixup_radix_point(buf, locale_radix_point);
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
// Ensure that the literal is float.
|
|
|
|
if (!strchr(buf, '.') && !strchr(buf, 'e'))
|
|
|
|
strcat(buf, ".0");
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2023-11-22 12:06:05 +00:00
|
|
|
#if defined(__clang__) || defined(__GNUC__)
|
|
|
|
#pragma GCC diagnostic pop
|
|
|
|
#elif defined(_MSC_VER)
|
|
|
|
#pragma warning(pop)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
class FloatFormatter
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
virtual ~FloatFormatter() = default;
|
|
|
|
virtual std::string format_float(float value) = 0;
|
|
|
|
virtual std::string format_double(double value) = 0;
|
|
|
|
};
|
|
|
|
|
2020-06-29 10:20:35 +00:00
|
|
|
template <typename T>
|
|
|
|
struct ValueSaver
|
|
|
|
{
|
|
|
|
explicit ValueSaver(T ¤t_)
|
2020-07-01 09:42:58 +00:00
|
|
|
: current(current_)
|
|
|
|
, saved(current_)
|
2020-06-29 10:20:35 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
void release()
|
|
|
|
{
|
|
|
|
current = saved;
|
|
|
|
}
|
|
|
|
|
|
|
|
~ValueSaver()
|
|
|
|
{
|
|
|
|
release();
|
|
|
|
}
|
|
|
|
|
|
|
|
T ¤t;
|
|
|
|
T saved;
|
|
|
|
};
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
struct Instruction
|
|
|
|
{
|
2018-10-05 09:30:57 +00:00
|
|
|
uint16_t op = 0;
|
|
|
|
uint16_t count = 0;
|
2021-03-08 13:09:32 +00:00
|
|
|
// If offset is 0 (not a valid offset into the instruction stream),
|
|
|
|
// we have an instruction stream which is embedded in the object.
|
2018-10-05 09:30:57 +00:00
|
|
|
uint32_t offset = 0;
|
|
|
|
uint32_t length = 0;
|
2021-03-08 13:09:32 +00:00
|
|
|
|
|
|
|
inline bool is_embedded() const
|
|
|
|
{
|
|
|
|
return offset == 0;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
struct EmbeddedInstruction : Instruction
|
|
|
|
{
|
|
|
|
SmallVector<uint32_t> ops;
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
enum Types
|
|
|
|
{
|
|
|
|
TypeNone,
|
|
|
|
TypeType,
|
|
|
|
TypeVariable,
|
|
|
|
TypeConstant,
|
|
|
|
TypeFunction,
|
|
|
|
TypeFunctionPrototype,
|
|
|
|
TypeBlock,
|
|
|
|
TypeExtension,
|
|
|
|
TypeExpression,
|
2016-10-03 13:54:02 +00:00
|
|
|
TypeConstantOp,
|
2017-04-25 08:44:55 +00:00
|
|
|
TypeCombinedImageSampler,
|
2017-08-10 13:36:30 +00:00
|
|
|
TypeAccessChain,
|
2019-01-10 08:49:33 +00:00
|
|
|
TypeUndef,
|
2019-05-28 11:41:46 +00:00
|
|
|
TypeString,
|
2019-01-10 08:49:33 +00:00
|
|
|
TypeCount
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
|
|
|
|
2019-09-05 10:43:40 +00:00
|
|
|
template <Types type>
|
|
|
|
class TypedID;
|
|
|
|
|
|
|
|
template <>
|
|
|
|
class TypedID<TypeNone>
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
TypedID() = default;
|
|
|
|
TypedID(uint32_t id_)
|
|
|
|
: id(id_)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
template <Types U>
|
|
|
|
TypedID(const TypedID<U> &other)
|
|
|
|
{
|
|
|
|
*this = other;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <Types U>
|
|
|
|
TypedID &operator=(const TypedID<U> &other)
|
|
|
|
{
|
|
|
|
id = uint32_t(other);
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Implicit conversion to u32 is desired here.
|
|
|
|
// As long as we block implicit conversion between TypedID<A> and TypedID<B> we're good.
|
|
|
|
operator uint32_t() const
|
|
|
|
{
|
|
|
|
return id;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <Types U>
|
|
|
|
operator TypedID<U>() const
|
|
|
|
{
|
|
|
|
return TypedID<U>(*this);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
uint32_t id = 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
template <Types type>
|
|
|
|
class TypedID
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
TypedID() = default;
|
|
|
|
TypedID(uint32_t id_)
|
|
|
|
: id(id_)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
explicit TypedID(const TypedID<TypeNone> &other)
|
|
|
|
: id(uint32_t(other))
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
operator uint32_t() const
|
|
|
|
{
|
|
|
|
return id;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
uint32_t id = 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
using VariableID = TypedID<TypeVariable>;
|
|
|
|
using TypeID = TypedID<TypeType>;
|
|
|
|
using ConstantID = TypedID<TypeConstant>;
|
|
|
|
using FunctionID = TypedID<TypeFunction>;
|
|
|
|
using BlockID = TypedID<TypeBlock>;
|
|
|
|
using ID = TypedID<TypeNone>;
|
|
|
|
|
|
|
|
// Helper for Variant interface.
|
|
|
|
struct IVariant
|
|
|
|
{
|
|
|
|
virtual ~IVariant() = default;
|
|
|
|
virtual IVariant *clone(ObjectPoolBase *pool) = 0;
|
|
|
|
ID self = 0;
|
2021-06-21 11:18:21 +00:00
|
|
|
|
|
|
|
protected:
|
|
|
|
IVariant() = default;
|
|
|
|
IVariant(const IVariant&) = default;
|
|
|
|
IVariant &operator=(const IVariant&) = default;
|
2019-09-05 10:43:40 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#define SPIRV_CROSS_DECLARE_CLONE(T) \
|
|
|
|
IVariant *clone(ObjectPoolBase *pool) override \
|
|
|
|
{ \
|
|
|
|
return static_cast<ObjectPool<T> *>(pool)->allocate(*this); \
|
|
|
|
}
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
struct SPIRUndef : IVariant
|
|
|
|
{
|
|
|
|
enum
|
|
|
|
{
|
|
|
|
type = TypeUndef
|
|
|
|
};
|
2019-02-12 10:11:29 +00:00
|
|
|
|
2019-09-05 10:43:40 +00:00
|
|
|
explicit SPIRUndef(TypeID basetype_)
|
2016-05-05 07:33:18 +00:00
|
|
|
: basetype(basetype_)
|
|
|
|
{
|
|
|
|
}
|
2019-09-05 10:43:40 +00:00
|
|
|
TypeID basetype;
|
2018-10-05 09:30:57 +00:00
|
|
|
|
|
|
|
SPIRV_CROSS_DECLARE_CLONE(SPIRUndef)
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
|
|
|
|
2019-05-28 11:41:46 +00:00
|
|
|
struct SPIRString : IVariant
|
|
|
|
{
|
|
|
|
enum
|
|
|
|
{
|
|
|
|
type = TypeString
|
|
|
|
};
|
|
|
|
|
|
|
|
explicit SPIRString(std::string str_)
|
|
|
|
: str(std::move(str_))
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string str;
|
|
|
|
|
|
|
|
SPIRV_CROSS_DECLARE_CLONE(SPIRString)
|
|
|
|
};
|
|
|
|
|
2017-04-25 08:44:55 +00:00
|
|
|
// This type is only used by backends which need to access the combined image and sampler IDs separately after
|
|
|
|
// the OpSampledImage opcode.
|
|
|
|
struct SPIRCombinedImageSampler : IVariant
|
|
|
|
{
|
|
|
|
enum
|
|
|
|
{
|
|
|
|
type = TypeCombinedImageSampler
|
|
|
|
};
|
2019-09-05 10:43:40 +00:00
|
|
|
SPIRCombinedImageSampler(TypeID type_, VariableID image_, VariableID sampler_)
|
2017-04-25 08:44:55 +00:00
|
|
|
: combined_type(type_)
|
|
|
|
, image(image_)
|
|
|
|
, sampler(sampler_)
|
|
|
|
{
|
|
|
|
}
|
2019-09-05 10:43:40 +00:00
|
|
|
TypeID combined_type;
|
|
|
|
VariableID image;
|
|
|
|
VariableID sampler;
|
2018-10-05 09:30:57 +00:00
|
|
|
|
|
|
|
SPIRV_CROSS_DECLARE_CLONE(SPIRCombinedImageSampler)
|
2017-04-25 08:44:55 +00:00
|
|
|
};
|
|
|
|
|
2016-10-03 13:54:02 +00:00
|
|
|
struct SPIRConstantOp : IVariant
|
|
|
|
{
|
|
|
|
enum
|
|
|
|
{
|
|
|
|
type = TypeConstantOp
|
|
|
|
};
|
|
|
|
|
2019-09-05 10:43:40 +00:00
|
|
|
SPIRConstantOp(TypeID result_type, spv::Op op, const uint32_t *args, uint32_t length)
|
2016-10-03 13:54:02 +00:00
|
|
|
: opcode(op)
|
|
|
|
, basetype(result_type)
|
|
|
|
{
|
2019-09-05 10:43:40 +00:00
|
|
|
arguments.reserve(length);
|
|
|
|
for (uint32_t i = 0; i < length; i++)
|
|
|
|
arguments.push_back(args[i]);
|
2016-10-03 13:54:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
spv::Op opcode;
|
2019-04-02 09:19:03 +00:00
|
|
|
SmallVector<uint32_t> arguments;
|
2019-09-05 10:43:40 +00:00
|
|
|
TypeID basetype;
|
2018-10-05 09:30:57 +00:00
|
|
|
|
|
|
|
SPIRV_CROSS_DECLARE_CLONE(SPIRConstantOp)
|
2016-10-03 13:54:02 +00:00
|
|
|
};
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
struct SPIRType : IVariant
|
|
|
|
{
|
|
|
|
enum
|
|
|
|
{
|
|
|
|
type = TypeType
|
|
|
|
};
|
|
|
|
|
2023-11-30 11:28:50 +00:00
|
|
|
spv::Op op = spv::Op::OpNop;
|
2023-12-07 11:18:35 +00:00
|
|
|
explicit SPIRType(spv::Op op_) : op(op_) {}
|
2023-11-30 11:28:50 +00:00
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
enum BaseType
|
|
|
|
{
|
|
|
|
Unknown,
|
|
|
|
Void,
|
2016-06-05 18:13:45 +00:00
|
|
|
Boolean,
|
2018-11-01 22:20:07 +00:00
|
|
|
SByte,
|
|
|
|
UByte,
|
|
|
|
Short,
|
|
|
|
UShort,
|
2016-05-05 07:33:18 +00:00
|
|
|
Int,
|
|
|
|
UInt,
|
2016-07-27 09:27:00 +00:00
|
|
|
Int64,
|
|
|
|
UInt64,
|
2016-05-05 07:33:18 +00:00
|
|
|
AtomicCounter,
|
2018-03-06 14:32:26 +00:00
|
|
|
Half,
|
2016-05-05 07:33:18 +00:00
|
|
|
Float,
|
2016-07-27 08:59:00 +00:00
|
|
|
Double,
|
2016-05-05 07:33:18 +00:00
|
|
|
Struct,
|
|
|
|
Image,
|
|
|
|
SampledImage,
|
MSL: Add support for tessellation evaluation shaders.
These are mapped to Metal's post-tessellation vertex functions. The
semantic difference is much less here, so this change should be simpler
than the previous one. There are still some hairy parts, though.
In MSL, the array of control point data is represented by a special
type, `patch_control_point<T>`, where `T` is a valid stage-input type.
This object must be embedded inside the patch-level stage input. For
this reason, I've added a new type to the type system to represent this.
On Mac, the number of input control points to the function must be
specified in the `patch()` attribute. This is optional on iOS.
SPIRV-Cross takes this from the `OutputVertices` execution mode; the
intent is that if it's not set in the shader itself, MoltenVK will set
it from the tessellation control shader. If you're translating these
offline, you'll have to update the control point count manually, since
this number must match the number that is passed to the
`drawPatches:...` family of methods.
Fixes #120.
2019-02-06 00:13:26 +00:00
|
|
|
Sampler,
|
2020-04-21 12:25:18 +00:00
|
|
|
AccelerationStructure,
|
|
|
|
RayQuery,
|
2019-03-27 09:21:30 +00:00
|
|
|
|
|
|
|
// Keep internal types at the end.
|
2019-02-26 14:43:03 +00:00
|
|
|
ControlPointArray,
|
MSL: Support pull-model interpolation on MSL 2.3+.
New in MSL 2.3 is a template that can be used in the place of a scalar
type in a stage-in struct. This template has methods which interpolate
the varying at the given points. Curiously, you can't set interpolation
attributes on such a varying; perspective-correctness is encoded in the
type, while interpolation must be done using one of the methods. This
makes using this somewhat awkward from SPIRV-Cross, requiring us to jump
through a bunch of hoops to make this all work.
Using varyings from functions in particular is a pain point, requiring
us to pass the stage-in struct itself around. An alternative is to pass
references to the interpolants; except this will fall over badly with
composite types, which naturally must be flattened. As with
tessellation, dynamic indexing isn't supported with pull-model
interpolation. This is because of the need to reference the original
struct member in order to call one of the pull-model interpolation
methods on it. Also, this is done at the variable level; this means that
if one varying in a struct is used with the pull-model functions, then
the entire struct is emitted as pull-model interpolants.
For some reason, this was not documented in the MSL spec, though there
is a property on `MTLDevice`, `supportsPullModelInterpolation`,
indicating support for this, which *is* documented. This does not appear
to be implemented yet for AMD: it returns `NO` from
`supportsPullModelInterpolation`, and pipelines with shaders using the
templates fail to compile. It *is* implemeted for Intel. It's probably
also implemented for Apple GPUs: on Apple Silicon, OpenGL calls down to
Metal, and it wouldn't be possible to use the interpolation functions
without this implemented in Metal.
Based on my testing, where SPIR-V and GLSL have the offset relative to
the pixel center, in Metal it appears to be relative to the pixel's
upper-left corner, as in HLSL. Therefore, I've added an offset 0.4375,
i.e. one half minus one sixteenth, to all arguments to
`interpolate_at_offset()`.
This also fixes a long-standing bug: if a pull-model interpolation
function is used on a varying, make sure that varying is declared. We
were already doing this only for the AMD pull-model function,
`interpolateAtVertexAMD()`; for reasons which are completely beyond me,
we weren't doing this for the base interpolation functions. I also note
that there are no tests for the interpolation functions for GLSL or
HLSL.
2020-11-03 02:56:46 +00:00
|
|
|
Interpolant,
|
2019-03-27 09:21:30 +00:00
|
|
|
Char
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// Scalar/vector/matrix support.
|
|
|
|
BaseType basetype = Unknown;
|
|
|
|
uint32_t width = 0;
|
|
|
|
uint32_t vecsize = 1;
|
|
|
|
uint32_t columns = 1;
|
|
|
|
|
2016-10-03 15:17:11 +00:00
|
|
|
// Arrays, support array of arrays by having a vector of array sizes.
|
2019-04-02 09:19:03 +00:00
|
|
|
SmallVector<uint32_t> array;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2016-10-03 15:17:11 +00:00
|
|
|
// Array elements can be either specialization constants or specialization ops.
|
|
|
|
// This array determines how to interpret the array size.
|
|
|
|
// If an element is true, the element is a literal,
|
|
|
|
// otherwise, it's an expression, which must be resolved on demand.
|
|
|
|
// The actual size is not really known until runtime.
|
2019-04-02 09:19:03 +00:00
|
|
|
SmallVector<bool> array_size_literal;
|
2016-10-03 15:17:11 +00:00
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
// Pointers
|
2018-11-26 11:23:28 +00:00
|
|
|
// Keep track of how many pointer layers we have.
|
|
|
|
uint32_t pointer_depth = 0;
|
2016-05-05 07:33:18 +00:00
|
|
|
bool pointer = false;
|
2020-05-25 09:05:42 +00:00
|
|
|
bool forward_pointer = false;
|
2018-11-26 11:23:28 +00:00
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
spv::StorageClass storage = spv::StorageClassGeneric;
|
|
|
|
|
2019-09-05 10:43:40 +00:00
|
|
|
SmallVector<TypeID> member_types;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2020-04-30 09:48:53 +00:00
|
|
|
// If member order has been rewritten to handle certain scenarios with Offset,
|
|
|
|
// allow codegen to rewrite the index.
|
|
|
|
SmallVector<uint32_t> member_type_index_redirection;
|
|
|
|
|
2017-10-24 08:25:38 +00:00
|
|
|
struct ImageType
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
2019-09-05 10:43:40 +00:00
|
|
|
TypeID type;
|
2016-05-05 07:33:18 +00:00
|
|
|
spv::Dim dim;
|
|
|
|
bool depth;
|
|
|
|
bool arrayed;
|
|
|
|
bool ms;
|
|
|
|
uint32_t sampled;
|
|
|
|
spv::ImageFormat format;
|
2017-05-30 00:45:05 +00:00
|
|
|
spv::AccessQualifier access;
|
2023-12-06 16:54:34 +00:00
|
|
|
} image = {};
|
2016-05-23 08:57:22 +00:00
|
|
|
|
|
|
|
// Structs can be declared multiple times if they are used as part of interface blocks.
|
|
|
|
// We want to detect this so that we only emit the struct definition once.
|
|
|
|
// Since we cannot rely on OpName to be equal, we need to figure out aliases.
|
2019-09-05 10:43:40 +00:00
|
|
|
TypeID type_alias = 0;
|
2016-05-23 10:25:09 +00:00
|
|
|
|
2017-01-21 10:30:33 +00:00
|
|
|
// Denotes the type which this type is based on.
|
|
|
|
// Allows the backend to traverse how a complex type is built up during access chains.
|
2019-09-05 10:43:40 +00:00
|
|
|
TypeID parent_type = 0;
|
2017-01-21 10:30:33 +00:00
|
|
|
|
2016-05-23 10:25:09 +00:00
|
|
|
// Used in backends to avoid emitting members with conflicting names.
|
|
|
|
std::unordered_set<std::string> member_name_cache;
|
2018-10-05 09:30:57 +00:00
|
|
|
|
|
|
|
SPIRV_CROSS_DECLARE_CLONE(SPIRType)
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct SPIRExtension : IVariant
|
|
|
|
{
|
|
|
|
enum
|
|
|
|
{
|
|
|
|
type = TypeExtension
|
|
|
|
};
|
|
|
|
|
|
|
|
enum Extension
|
|
|
|
{
|
2017-03-21 15:33:54 +00:00
|
|
|
Unsupported,
|
2017-11-21 13:04:57 +00:00
|
|
|
GLSL,
|
2019-07-04 08:03:06 +00:00
|
|
|
SPV_debug_info,
|
2017-11-21 13:04:57 +00:00
|
|
|
SPV_AMD_shader_ballot,
|
|
|
|
SPV_AMD_shader_explicit_vertex_parameter,
|
|
|
|
SPV_AMD_shader_trinary_minmax,
|
2022-04-19 10:07:54 +00:00
|
|
|
SPV_AMD_gcn_shader,
|
2022-11-08 11:14:51 +00:00
|
|
|
NonSemanticDebugPrintf,
|
2023-01-12 11:41:19 +00:00
|
|
|
NonSemanticShaderDebugInfo,
|
|
|
|
NonSemanticGeneric
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
2016-03-02 17:09:16 +00:00
|
|
|
|
2019-02-12 10:11:29 +00:00
|
|
|
explicit SPIRExtension(Extension ext_)
|
2016-05-05 07:33:18 +00:00
|
|
|
: ext(ext_)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
Extension ext;
|
2018-10-05 09:30:57 +00:00
|
|
|
SPIRV_CROSS_DECLARE_CLONE(SPIRExtension)
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
|
|
|
|
2016-07-28 09:16:02 +00:00
|
|
|
// SPIREntryPoint is not a variant since its IDs are used to decorate OpFunction,
|
|
|
|
// so in order to avoid conflicts, we can't stick them in the ids array.
|
|
|
|
struct SPIREntryPoint
|
|
|
|
{
|
2019-09-05 10:43:40 +00:00
|
|
|
SPIREntryPoint(FunctionID self_, spv::ExecutionModel execution_model, const std::string &entry_name)
|
2016-07-28 09:16:02 +00:00
|
|
|
: self(self_)
|
2017-11-06 02:34:42 +00:00
|
|
|
, name(entry_name)
|
|
|
|
, orig_name(entry_name)
|
2016-07-28 09:16:02 +00:00
|
|
|
, model(execution_model)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
SPIREntryPoint() = default;
|
|
|
|
|
2019-09-05 10:43:40 +00:00
|
|
|
FunctionID self = 0;
|
2016-07-28 09:16:02 +00:00
|
|
|
std::string name;
|
2017-11-06 02:34:42 +00:00
|
|
|
std::string orig_name;
|
2019-09-05 10:43:40 +00:00
|
|
|
SmallVector<VariableID> interface_variables;
|
2016-07-28 09:16:02 +00:00
|
|
|
|
2018-03-12 12:09:25 +00:00
|
|
|
Bitset flags;
|
2020-07-11 11:35:44 +00:00
|
|
|
struct WorkgroupSize
|
2016-07-28 09:16:02 +00:00
|
|
|
{
|
|
|
|
uint32_t x = 0, y = 0, z = 0;
|
2022-01-05 14:53:51 +00:00
|
|
|
uint32_t id_x = 0, id_y = 0, id_z = 0;
|
2017-09-28 09:33:30 +00:00
|
|
|
uint32_t constant = 0; // Workgroup size can be expressed as a constant/spec-constant instead.
|
2016-07-28 09:16:02 +00:00
|
|
|
} workgroup_size;
|
|
|
|
uint32_t invocations = 0;
|
|
|
|
uint32_t output_vertices = 0;
|
2022-09-02 14:31:04 +00:00
|
|
|
uint32_t output_primitives = 0;
|
2019-02-12 10:11:29 +00:00
|
|
|
spv::ExecutionModel model = spv::ExecutionModelMax;
|
2020-01-15 15:18:29 +00:00
|
|
|
bool geometry_passthrough = false;
|
2016-07-28 09:16:02 +00:00
|
|
|
};
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
struct SPIRExpression : IVariant
|
|
|
|
{
|
|
|
|
enum
|
|
|
|
{
|
|
|
|
type = TypeExpression
|
|
|
|
};
|
|
|
|
|
|
|
|
// Only created by the backend target to avoid creating tons of temporaries.
|
2019-09-05 10:43:40 +00:00
|
|
|
SPIRExpression(std::string expr, TypeID expression_type_, bool immutable_)
|
2022-02-25 23:15:35 +00:00
|
|
|
: expression(std::move(expr))
|
2016-05-05 07:33:18 +00:00
|
|
|
, expression_type(expression_type_)
|
|
|
|
, immutable(immutable_)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
// If non-zero, prepend expression with to_expression(base_expression).
|
|
|
|
// Used in amortizing multiple calls to to_expression()
|
|
|
|
// where in certain cases that would quickly force a temporary when not needed.
|
2019-09-05 10:43:40 +00:00
|
|
|
ID base_expression = 0;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
std::string expression;
|
2019-09-05 10:43:40 +00:00
|
|
|
TypeID expression_type = 0;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
// If this expression is a forwarded load,
|
|
|
|
// allow us to reference the original variable.
|
2019-09-05 10:43:40 +00:00
|
|
|
ID loaded_from = 0;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
// If this expression will never change, we can avoid lots of temporaries
|
|
|
|
// in high level source.
|
2016-07-12 12:33:04 +00:00
|
|
|
// An expression being immutable can be speculative,
|
|
|
|
// it is assumed that this is true almost always.
|
2016-05-05 07:33:18 +00:00
|
|
|
bool immutable = false;
|
|
|
|
|
2017-01-13 15:31:13 +00:00
|
|
|
// Before use, this expression must be transposed.
|
|
|
|
// This is needed for targets which don't support row_major layouts.
|
|
|
|
bool need_transpose = false;
|
|
|
|
|
MSL: Support SPV_KHR_variable_pointers.
This allows shaders to declare and use pointer-type variables. Pointers
may be loaded and stored, be the result of an `OpSelect`, be passed to
and returned from functions, and even be passed as inputs to the `OpPhi`
instruction. All types of pointers may be used as variable pointers.
Variable pointers to storage buffers and workgroup memory may even be
loaded from and stored to, as though they were ordinary variables. In
addition, this enables using an interior pointer to an array as though
it were an array pointer itself using the `OpPtrAccessChain`
instruction.
This is a rather large and involved change, mostly because this is
somewhat complicated with a lot of moving parts. It's a wonder
SPIRV-Cross's output is largely unchanged. Indeed, many of these changes
are to accomplish exactly that! Perhaps the largest source of changes
was the violation of the assumption that, when emitting types, the
pointer type didn't matter.
One of the test cases added by the change doesn't optimize very well;
the output of `spirv-opt` here is invalid SPIR-V. I need to file a bug
with SPIRV-Tools about this.
I wanted to test that variable pointers to images worked too, but I
couldn't figure out how to propagate the access qualifier properly--in
MSL, it's part of the type, so getting this right is important. I've
punted on that for now.
2018-12-03 08:06:33 +00:00
|
|
|
// Whether or not this is an access chain expression.
|
|
|
|
bool access_chain = false;
|
|
|
|
|
2023-07-01 13:49:35 +00:00
|
|
|
// Whether or not gl_MeshVerticesEXT[].gl_Position (as a whole or .y) is referenced
|
|
|
|
bool access_meshlet_position_y = false;
|
|
|
|
|
2016-07-12 12:33:04 +00:00
|
|
|
// A list of expressions which this expression depends on.
|
2019-09-05 10:43:40 +00:00
|
|
|
SmallVector<ID> expression_dependencies;
|
2018-10-05 09:30:57 +00:00
|
|
|
|
2019-01-04 12:19:50 +00:00
|
|
|
// By reading this expression, we implicitly read these expressions as well.
|
|
|
|
// Used by access chain Store and Load since we read multiple expressions in this case.
|
2019-09-05 10:43:40 +00:00
|
|
|
SmallVector<ID> implied_read_expressions;
|
2019-01-04 12:19:50 +00:00
|
|
|
|
2020-06-29 10:20:35 +00:00
|
|
|
// The expression was emitted at a certain scope. Lets us track when an expression read means multiple reads.
|
|
|
|
uint32_t emitted_loop_level = 0;
|
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
SPIRV_CROSS_DECLARE_CLONE(SPIRExpression)
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct SPIRFunctionPrototype : IVariant
|
|
|
|
{
|
|
|
|
enum
|
|
|
|
{
|
|
|
|
type = TypeFunctionPrototype
|
|
|
|
};
|
|
|
|
|
2019-09-05 10:43:40 +00:00
|
|
|
explicit SPIRFunctionPrototype(TypeID return_type_)
|
2016-05-05 07:33:18 +00:00
|
|
|
: return_type(return_type_)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-09-05 10:43:40 +00:00
|
|
|
TypeID return_type;
|
2019-04-02 09:19:03 +00:00
|
|
|
SmallVector<uint32_t> parameter_types;
|
2018-10-05 09:30:57 +00:00
|
|
|
|
|
|
|
SPIRV_CROSS_DECLARE_CLONE(SPIRFunctionPrototype)
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct SPIRBlock : IVariant
|
|
|
|
{
|
|
|
|
enum
|
|
|
|
{
|
|
|
|
type = TypeBlock
|
|
|
|
};
|
|
|
|
|
|
|
|
enum Terminator
|
|
|
|
{
|
|
|
|
Unknown,
|
|
|
|
Direct, // Emit next block directly without a particular condition.
|
|
|
|
|
|
|
|
Select, // Block ends with an if/else block.
|
|
|
|
MultiSelect, // Block ends with switch statement.
|
|
|
|
|
|
|
|
Return, // Block ends with return.
|
|
|
|
Unreachable, // Noop
|
2021-01-08 10:37:29 +00:00
|
|
|
Kill, // Discard
|
|
|
|
IgnoreIntersection, // Ray Tracing
|
2022-09-05 10:31:22 +00:00
|
|
|
TerminateRay, // Ray Tracing
|
|
|
|
EmitMeshTasks // Mesh shaders
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
enum Merge
|
|
|
|
{
|
|
|
|
MergeNone,
|
|
|
|
MergeLoop,
|
|
|
|
MergeSelection
|
|
|
|
};
|
|
|
|
|
2018-06-25 08:33:13 +00:00
|
|
|
enum Hints
|
|
|
|
{
|
|
|
|
HintNone,
|
|
|
|
HintUnroll,
|
|
|
|
HintDontUnroll,
|
|
|
|
HintFlatten,
|
|
|
|
HintDontFlatten
|
|
|
|
};
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
enum Method
|
|
|
|
{
|
|
|
|
MergeToSelectForLoop,
|
2018-03-08 16:51:55 +00:00
|
|
|
MergeToDirectForLoop,
|
|
|
|
MergeToSelectContinueForLoop
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
enum ContinueBlockType
|
|
|
|
{
|
|
|
|
ContinueNone,
|
|
|
|
|
|
|
|
// Continue block is branchless and has at least one instruction.
|
|
|
|
ForLoop,
|
|
|
|
|
|
|
|
// Noop continue block.
|
|
|
|
WhileLoop,
|
|
|
|
|
|
|
|
// Continue block is conditional.
|
|
|
|
DoWhileLoop,
|
|
|
|
|
|
|
|
// Highly unlikely that anything will use this,
|
|
|
|
// since it is really awkward/impossible to express in GLSL.
|
|
|
|
ComplexLoop
|
|
|
|
};
|
|
|
|
|
2020-04-30 08:00:28 +00:00
|
|
|
enum : uint32_t
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
|
|
|
NoDominator = 0xffffffffu
|
|
|
|
};
|
|
|
|
|
|
|
|
Terminator terminator = Unknown;
|
|
|
|
Merge merge = MergeNone;
|
2018-06-25 08:33:13 +00:00
|
|
|
Hints hint = HintNone;
|
2019-09-05 10:43:40 +00:00
|
|
|
BlockID next_block = 0;
|
|
|
|
BlockID merge_block = 0;
|
|
|
|
BlockID continue_block = 0;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2019-09-05 10:43:40 +00:00
|
|
|
ID return_value = 0; // If 0, return nothing (void).
|
|
|
|
ID condition = 0;
|
|
|
|
BlockID true_block = 0;
|
|
|
|
BlockID false_block = 0;
|
|
|
|
BlockID default_block = 0;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2022-09-05 10:31:22 +00:00
|
|
|
// If terminator is EmitMeshTasksEXT.
|
|
|
|
struct
|
|
|
|
{
|
|
|
|
ID groups[3];
|
|
|
|
ID payload;
|
|
|
|
} mesh = {};
|
|
|
|
|
2019-04-02 09:19:03 +00:00
|
|
|
SmallVector<Instruction> ops;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
struct Phi
|
|
|
|
{
|
2019-09-05 10:43:40 +00:00
|
|
|
ID local_variable; // flush local variable ...
|
|
|
|
BlockID parent; // If we're in from_block and want to branch into this block ...
|
|
|
|
VariableID function_variable; // to this function-global "phi" variable first.
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// Before entering this block flush out local variables to magical "phi" variables.
|
2019-04-02 09:19:03 +00:00
|
|
|
SmallVector<Phi> phi_variables;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
// Declare these temporaries before beginning the block.
|
|
|
|
// Used for handling complex continue blocks which have side effects.
|
2019-09-05 10:43:40 +00:00
|
|
|
SmallVector<std::pair<TypeID, ID>> declare_temporary;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2018-03-24 03:16:18 +00:00
|
|
|
// Declare these temporaries, but only conditionally if this block turns out to be
|
|
|
|
// a complex loop header.
|
2019-09-05 10:43:40 +00:00
|
|
|
SmallVector<std::pair<TypeID, ID>> potential_declare_temporary;
|
2018-03-24 03:16:18 +00:00
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
struct Case
|
|
|
|
{
|
2021-10-28 22:57:41 +00:00
|
|
|
uint64_t value;
|
2019-09-05 10:43:40 +00:00
|
|
|
BlockID block;
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
2021-11-12 13:17:38 +00:00
|
|
|
SmallVector<Case> cases_32bit;
|
2021-11-02 20:17:13 +00:00
|
|
|
SmallVector<Case> cases_64bit;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
// If we have tried to optimize code for this block but failed,
|
|
|
|
// keep track of this.
|
|
|
|
bool disable_block_optimization = false;
|
|
|
|
|
|
|
|
// If the continue block is complex, fallback to "dumb" for loops.
|
|
|
|
bool complex_continue = false;
|
|
|
|
|
2018-09-18 08:50:48 +00:00
|
|
|
// Do we need a ladder variable to defer breaking out of a loop construct after a switch block?
|
|
|
|
bool need_ladder_break = false;
|
|
|
|
|
2019-06-21 09:16:51 +00:00
|
|
|
// If marked, we have explicitly handled Phi from this block, so skip any flushes related to that on a branch.
|
|
|
|
// Used to handle an edge case with switch and case-label fallthrough where fall-through writes to Phi.
|
2019-09-05 10:43:40 +00:00
|
|
|
BlockID ignore_phi_from_block = 0;
|
2019-06-21 09:16:51 +00:00
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
// The dominating block which this block might be within.
|
|
|
|
// Used in continue; blocks to determine if we really need to write continue.
|
2019-09-05 10:43:40 +00:00
|
|
|
BlockID loop_dominator = 0;
|
2016-11-11 17:04:14 +00:00
|
|
|
|
|
|
|
// All access to these variables are dominated by this block,
|
|
|
|
// so before branching anywhere we need to make sure that we declare these variables.
|
2019-09-05 10:43:40 +00:00
|
|
|
SmallVector<VariableID> dominated_variables;
|
2016-12-15 16:14:47 +00:00
|
|
|
|
|
|
|
// These are variables which should be declared in a for loop header, if we
|
|
|
|
// fail to use a classic for-loop,
|
|
|
|
// we remove these variables, and fall back to regular variables outside the loop.
|
2019-09-05 10:43:40 +00:00
|
|
|
SmallVector<VariableID> loop_variables;
|
2018-03-12 16:34:54 +00:00
|
|
|
|
|
|
|
// Some expressions are control-flow dependent, i.e. any instruction which relies on derivatives or
|
|
|
|
// sub-group-like operations.
|
|
|
|
// Make sure that we only use these expressions in the original block.
|
2019-09-05 10:43:40 +00:00
|
|
|
SmallVector<ID> invalidate_expressions;
|
2018-10-05 09:30:57 +00:00
|
|
|
|
|
|
|
SPIRV_CROSS_DECLARE_CLONE(SPIRBlock)
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct SPIRFunction : IVariant
|
|
|
|
{
|
|
|
|
enum
|
|
|
|
{
|
|
|
|
type = TypeFunction
|
|
|
|
};
|
|
|
|
|
2019-09-05 10:43:40 +00:00
|
|
|
SPIRFunction(TypeID return_type_, TypeID function_type_)
|
2016-05-05 07:33:18 +00:00
|
|
|
: return_type(return_type_)
|
|
|
|
, function_type(function_type_)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
struct Parameter
|
|
|
|
{
|
2019-09-05 10:43:40 +00:00
|
|
|
TypeID type;
|
|
|
|
ID id;
|
2016-05-05 07:33:18 +00:00
|
|
|
uint32_t read_count;
|
|
|
|
uint32_t write_count;
|
2017-02-05 09:50:14 +00:00
|
|
|
|
|
|
|
// Set to true if this parameter aliases a global variable,
|
|
|
|
// used mostly in Metal where global variables
|
|
|
|
// have to be passed down to functions as regular arguments.
|
|
|
|
// However, for this kind of variable, we should not care about
|
|
|
|
// read and write counts as access to the function arguments
|
|
|
|
// is not local to the function in question.
|
|
|
|
bool alias_global_variable;
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
|
|
|
|
2016-09-11 09:39:20 +00:00
|
|
|
// When calling a function, and we're remapping separate image samplers,
|
|
|
|
// resolve these arguments into combined image samplers and pass them
|
|
|
|
// as additional arguments in this order.
|
|
|
|
// It gets more complicated as functions can pull in their own globals
|
|
|
|
// and combine them with parameters,
|
|
|
|
// so we need to distinguish if something is local parameter index
|
|
|
|
// or a global ID.
|
|
|
|
struct CombinedImageSamplerParameter
|
|
|
|
{
|
2019-09-05 10:43:40 +00:00
|
|
|
VariableID id;
|
|
|
|
VariableID image_id;
|
|
|
|
VariableID sampler_id;
|
2016-09-11 11:47:06 +00:00
|
|
|
bool global_image;
|
2016-09-11 09:39:20 +00:00
|
|
|
bool global_sampler;
|
2017-05-06 11:21:35 +00:00
|
|
|
bool depth;
|
2016-09-11 09:39:20 +00:00
|
|
|
};
|
|
|
|
|
2019-09-05 10:43:40 +00:00
|
|
|
TypeID return_type;
|
|
|
|
TypeID function_type;
|
2019-04-02 09:19:03 +00:00
|
|
|
SmallVector<Parameter> arguments;
|
2016-09-11 10:36:12 +00:00
|
|
|
|
|
|
|
// Can be used by backends to add magic arguments.
|
|
|
|
// Currently used by combined image/sampler implementation.
|
|
|
|
|
2019-04-02 09:19:03 +00:00
|
|
|
SmallVector<Parameter> shadow_arguments;
|
2019-09-05 10:43:40 +00:00
|
|
|
SmallVector<VariableID> local_variables;
|
|
|
|
BlockID entry_block = 0;
|
|
|
|
SmallVector<BlockID> blocks;
|
2019-04-02 09:19:03 +00:00
|
|
|
SmallVector<CombinedImageSamplerParameter> combined_parameters;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2019-05-28 11:41:46 +00:00
|
|
|
struct EntryLine
|
|
|
|
{
|
|
|
|
uint32_t file_id = 0;
|
|
|
|
uint32_t line_literal = 0;
|
|
|
|
};
|
|
|
|
EntryLine entry_line;
|
|
|
|
|
2019-09-05 10:43:40 +00:00
|
|
|
void add_local_variable(VariableID id)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
|
|
|
local_variables.push_back(id);
|
|
|
|
}
|
|
|
|
|
2019-09-05 10:43:40 +00:00
|
|
|
void add_parameter(TypeID parameter_type, ID id, bool alias_global_variable = false)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
|
|
|
// Arguments are read-only until proven otherwise.
|
2017-02-05 09:50:14 +00:00
|
|
|
arguments.push_back({ parameter_type, id, 0u, 0u, alias_global_variable });
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
|
|
|
|
2018-09-17 12:04:55 +00:00
|
|
|
// Hooks to be run when the function returns.
|
2018-03-13 13:03:35 +00:00
|
|
|
// Mostly used for lowering internal data structures onto flattened structures.
|
2018-09-17 12:04:55 +00:00
|
|
|
// Need to defer this, because they might rely on things which change during compilation.
|
2019-04-02 09:19:03 +00:00
|
|
|
// Intentionally not a small vector, this one is rare, and std::function can be large.
|
|
|
|
Vector<std::function<void()>> fixup_hooks_out;
|
2018-06-12 15:41:35 +00:00
|
|
|
|
2018-09-17 12:04:55 +00:00
|
|
|
// Hooks to be run when the function begins.
|
2018-06-12 15:41:35 +00:00
|
|
|
// Mostly used for populating internal data structures from flattened structures.
|
2018-09-17 12:04:55 +00:00
|
|
|
// Need to defer this, because they might rely on things which change during compilation.
|
2019-04-02 09:19:03 +00:00
|
|
|
// Intentionally not a small vector, this one is rare, and std::function can be large.
|
|
|
|
Vector<std::function<void()>> fixup_hooks_in;
|
2018-03-13 13:03:35 +00:00
|
|
|
|
2020-02-24 12:22:52 +00:00
|
|
|
// On function entry, make sure to copy a constant array into thread addr space to work around
|
|
|
|
// the case where we are passing a constant array by value to a function on backends which do not
|
|
|
|
// consider arrays value types.
|
|
|
|
SmallVector<ID> constant_arrays_needed_on_stack;
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
bool active = false;
|
|
|
|
bool flush_undeclared = true;
|
2016-09-11 09:39:20 +00:00
|
|
|
bool do_combined_parameters = true;
|
2018-10-05 09:30:57 +00:00
|
|
|
|
|
|
|
SPIRV_CROSS_DECLARE_CLONE(SPIRFunction)
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
|
|
|
|
2017-08-10 13:36:30 +00:00
|
|
|
struct SPIRAccessChain : IVariant
|
|
|
|
{
|
|
|
|
enum
|
|
|
|
{
|
|
|
|
type = TypeAccessChain
|
|
|
|
};
|
|
|
|
|
2019-09-05 10:43:40 +00:00
|
|
|
SPIRAccessChain(TypeID basetype_, spv::StorageClass storage_, std::string base_, std::string dynamic_index_,
|
2017-08-28 07:01:03 +00:00
|
|
|
int32_t static_index_)
|
|
|
|
: basetype(basetype_)
|
|
|
|
, storage(storage_)
|
2019-02-12 10:11:29 +00:00
|
|
|
, base(std::move(base_))
|
2017-08-28 07:01:03 +00:00
|
|
|
, dynamic_index(std::move(dynamic_index_))
|
|
|
|
, static_index(static_index_)
|
2017-08-10 13:36:30 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
// The access chain represents an offset into a buffer.
|
|
|
|
// Some backends need more complicated handling of access chains to be able to use buffers, like HLSL
|
|
|
|
// which has no usable buffer type ala GLSL SSBOs.
|
|
|
|
// StructuredBuffer is too limited, so our only option is to deal with ByteAddressBuffer which works with raw addresses.
|
|
|
|
|
2019-09-05 10:43:40 +00:00
|
|
|
TypeID basetype;
|
2017-08-10 13:36:30 +00:00
|
|
|
spv::StorageClass storage;
|
|
|
|
std::string base;
|
|
|
|
std::string dynamic_index;
|
|
|
|
int32_t static_index;
|
|
|
|
|
2019-09-05 10:43:40 +00:00
|
|
|
VariableID loaded_from = 0;
|
2017-10-26 14:35:18 +00:00
|
|
|
uint32_t matrix_stride = 0;
|
2020-01-08 12:05:56 +00:00
|
|
|
uint32_t array_stride = 0;
|
2017-10-26 14:35:18 +00:00
|
|
|
bool row_major_matrix = false;
|
2017-08-10 15:12:48 +00:00
|
|
|
bool immutable = false;
|
2018-10-05 09:30:57 +00:00
|
|
|
|
2019-01-04 12:19:50 +00:00
|
|
|
// By reading this expression, we implicitly read these expressions as well.
|
|
|
|
// Used by access chain Store and Load since we read multiple expressions in this case.
|
2019-09-05 10:43:40 +00:00
|
|
|
SmallVector<ID> implied_read_expressions;
|
2019-01-04 12:19:50 +00:00
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
SPIRV_CROSS_DECLARE_CLONE(SPIRAccessChain)
|
2017-08-10 13:36:30 +00:00
|
|
|
};
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
struct SPIRVariable : IVariant
|
|
|
|
{
|
|
|
|
enum
|
|
|
|
{
|
|
|
|
type = TypeVariable
|
|
|
|
};
|
|
|
|
|
|
|
|
SPIRVariable() = default;
|
2019-09-05 10:43:40 +00:00
|
|
|
SPIRVariable(TypeID basetype_, spv::StorageClass storage_, ID initializer_ = 0, VariableID basevariable_ = 0)
|
2016-05-05 07:33:18 +00:00
|
|
|
: basetype(basetype_)
|
|
|
|
, storage(storage_)
|
|
|
|
, initializer(initializer_)
|
2017-12-06 17:51:23 +00:00
|
|
|
, basevariable(basevariable_)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-09-05 10:43:40 +00:00
|
|
|
TypeID basetype = 0;
|
2016-05-05 07:33:18 +00:00
|
|
|
spv::StorageClass storage = spv::StorageClassGeneric;
|
|
|
|
uint32_t decoration = 0;
|
2019-09-05 10:43:40 +00:00
|
|
|
ID initializer = 0;
|
|
|
|
VariableID basevariable = 0;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2019-04-02 09:19:03 +00:00
|
|
|
SmallVector<uint32_t> dereference_chain;
|
2016-05-05 07:33:18 +00:00
|
|
|
bool compat_builtin = false;
|
|
|
|
|
|
|
|
// If a variable is shadowed, we only statically assign to it
|
|
|
|
// and never actually emit a statement for it.
|
|
|
|
// When we read the variable as an expression, just forward
|
|
|
|
// shadowed_id as the expression.
|
|
|
|
bool statically_assigned = false;
|
2019-09-05 10:43:40 +00:00
|
|
|
ID static_expression = 0;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
// Temporaries which can remain forwarded as long as this variable is not modified.
|
2019-09-05 10:43:40 +00:00
|
|
|
SmallVector<ID> dependees;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
|
|
|
bool deferred_declaration = false;
|
|
|
|
bool phi_variable = false;
|
2019-01-07 13:19:27 +00:00
|
|
|
|
|
|
|
// Used to deal with Phi variable flushes. See flush_phi().
|
|
|
|
bool allocate_temporary_copy = false;
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
bool remapped_variable = false;
|
2016-07-06 09:04:06 +00:00
|
|
|
uint32_t remapped_components = 0;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2016-12-15 16:14:47 +00:00
|
|
|
// The block which dominates all access to this variable.
|
2019-09-05 10:43:40 +00:00
|
|
|
BlockID dominator = 0;
|
2016-12-15 16:14:47 +00:00
|
|
|
// If true, this variable is a loop variable, when accessing the variable
|
|
|
|
// outside a loop,
|
|
|
|
// we should statically forward it.
|
|
|
|
bool loop_variable = false;
|
|
|
|
// Set to true while we're inside the for loop.
|
|
|
|
bool loop_variable_enable = false;
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
SPIRFunction::Parameter *parameter = nullptr;
|
2018-10-05 09:30:57 +00:00
|
|
|
|
|
|
|
SPIRV_CROSS_DECLARE_CLONE(SPIRVariable)
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct SPIRConstant : IVariant
|
|
|
|
{
|
|
|
|
enum
|
|
|
|
{
|
|
|
|
type = TypeConstant
|
|
|
|
};
|
|
|
|
|
2020-07-01 09:42:58 +00:00
|
|
|
union Constant
|
|
|
|
{
|
2016-05-05 07:33:18 +00:00
|
|
|
uint32_t u32;
|
|
|
|
int32_t i32;
|
|
|
|
float f32;
|
2016-07-27 08:59:00 +00:00
|
|
|
|
|
|
|
uint64_t u64;
|
|
|
|
int64_t i64;
|
|
|
|
double f64;
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct ConstantVector
|
|
|
|
{
|
|
|
|
Constant r[4];
|
2017-09-28 11:28:44 +00:00
|
|
|
// If != 0, this element is a specialization constant, and we should keep track of it as such.
|
2019-09-05 10:43:40 +00:00
|
|
|
ID id[4];
|
2017-09-27 13:16:33 +00:00
|
|
|
uint32_t vecsize = 1;
|
2018-02-03 22:33:08 +00:00
|
|
|
|
|
|
|
ConstantVector()
|
|
|
|
{
|
2018-05-07 21:41:53 +00:00
|
|
|
memset(r, 0, sizeof(r));
|
2018-02-03 22:33:08 +00:00
|
|
|
}
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct ConstantMatrix
|
|
|
|
{
|
|
|
|
ConstantVector c[4];
|
2017-09-28 11:28:44 +00:00
|
|
|
// If != 0, this column is a specialization constant, and we should keep track of it as such.
|
2019-09-05 10:43:40 +00:00
|
|
|
ID id[4];
|
2017-09-27 13:16:33 +00:00
|
|
|
uint32_t columns = 1;
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
|
|
|
|
2018-03-06 14:32:26 +00:00
|
|
|
static inline float f16_to_f32(uint16_t u16_value)
|
|
|
|
{
|
|
|
|
// Based on the GLM implementation.
|
|
|
|
int s = (u16_value >> 15) & 0x1;
|
|
|
|
int e = (u16_value >> 10) & 0x1f;
|
|
|
|
int m = (u16_value >> 0) & 0x3ff;
|
|
|
|
|
2020-07-01 09:42:58 +00:00
|
|
|
union
|
|
|
|
{
|
2018-03-06 14:32:26 +00:00
|
|
|
float f32;
|
|
|
|
uint32_t u32;
|
|
|
|
} u;
|
|
|
|
|
|
|
|
if (e == 0)
|
|
|
|
{
|
|
|
|
if (m == 0)
|
|
|
|
{
|
|
|
|
u.u32 = uint32_t(s) << 31;
|
|
|
|
return u.f32;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
while ((m & 0x400) == 0)
|
|
|
|
{
|
|
|
|
m <<= 1;
|
|
|
|
e--;
|
|
|
|
}
|
|
|
|
|
|
|
|
e++;
|
|
|
|
m &= ~0x400;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (e == 31)
|
|
|
|
{
|
|
|
|
if (m == 0)
|
|
|
|
{
|
|
|
|
u.u32 = (uint32_t(s) << 31) | 0x7f800000u;
|
|
|
|
return u.f32;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
u.u32 = (uint32_t(s) << 31) | 0x7f800000u | (m << 13);
|
|
|
|
return u.f32;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
e += 127 - 15;
|
|
|
|
m <<= 13;
|
|
|
|
u.u32 = (uint32_t(s) << 31) | (e << 23) | m;
|
|
|
|
return u.f32;
|
|
|
|
}
|
|
|
|
|
2017-09-27 14:10:29 +00:00
|
|
|
inline uint32_t specialization_constant_id(uint32_t col, uint32_t row) const
|
|
|
|
{
|
|
|
|
return m.c[col].id[row];
|
|
|
|
}
|
|
|
|
|
|
|
|
inline uint32_t specialization_constant_id(uint32_t col) const
|
|
|
|
{
|
|
|
|
return m.id[col];
|
|
|
|
}
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
inline uint32_t scalar(uint32_t col = 0, uint32_t row = 0) const
|
|
|
|
{
|
|
|
|
return m.c[col].r[row].u32;
|
|
|
|
}
|
|
|
|
|
2018-11-01 22:20:07 +00:00
|
|
|
inline int16_t scalar_i16(uint32_t col = 0, uint32_t row = 0) const
|
|
|
|
{
|
|
|
|
return int16_t(m.c[col].r[row].u32 & 0xffffu);
|
|
|
|
}
|
|
|
|
|
2018-03-06 14:32:26 +00:00
|
|
|
inline uint16_t scalar_u16(uint32_t col = 0, uint32_t row = 0) const
|
|
|
|
{
|
|
|
|
return uint16_t(m.c[col].r[row].u32 & 0xffffu);
|
|
|
|
}
|
|
|
|
|
2019-01-30 13:49:55 +00:00
|
|
|
inline int8_t scalar_i8(uint32_t col = 0, uint32_t row = 0) const
|
|
|
|
{
|
|
|
|
return int8_t(m.c[col].r[row].u32 & 0xffu);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline uint8_t scalar_u8(uint32_t col = 0, uint32_t row = 0) const
|
|
|
|
{
|
|
|
|
return uint8_t(m.c[col].r[row].u32 & 0xffu);
|
|
|
|
}
|
|
|
|
|
2018-03-06 14:32:26 +00:00
|
|
|
inline float scalar_f16(uint32_t col = 0, uint32_t row = 0) const
|
|
|
|
{
|
|
|
|
return f16_to_f32(scalar_u16(col, row));
|
|
|
|
}
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
inline float scalar_f32(uint32_t col = 0, uint32_t row = 0) const
|
|
|
|
{
|
|
|
|
return m.c[col].r[row].f32;
|
|
|
|
}
|
|
|
|
|
2016-07-27 08:59:00 +00:00
|
|
|
inline int32_t scalar_i32(uint32_t col = 0, uint32_t row = 0) const
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
|
|
|
return m.c[col].r[row].i32;
|
|
|
|
}
|
|
|
|
|
2016-07-27 08:59:00 +00:00
|
|
|
inline double scalar_f64(uint32_t col = 0, uint32_t row = 0) const
|
|
|
|
{
|
|
|
|
return m.c[col].r[row].f64;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline int64_t scalar_i64(uint32_t col = 0, uint32_t row = 0) const
|
|
|
|
{
|
|
|
|
return m.c[col].r[row].i64;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline uint64_t scalar_u64(uint32_t col = 0, uint32_t row = 0) const
|
|
|
|
{
|
|
|
|
return m.c[col].r[row].u64;
|
|
|
|
}
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
inline const ConstantVector &vector() const
|
|
|
|
{
|
|
|
|
return m.c[0];
|
|
|
|
}
|
2017-09-27 14:10:29 +00:00
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
inline uint32_t vector_size() const
|
|
|
|
{
|
|
|
|
return m.c[0].vecsize;
|
|
|
|
}
|
2017-09-27 14:10:29 +00:00
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
inline uint32_t columns() const
|
|
|
|
{
|
|
|
|
return m.columns;
|
|
|
|
}
|
|
|
|
|
2017-08-03 12:32:07 +00:00
|
|
|
inline void make_null(const SPIRType &constant_type_)
|
|
|
|
{
|
2018-05-07 21:41:53 +00:00
|
|
|
m = {};
|
2017-08-03 12:32:07 +00:00
|
|
|
m.columns = constant_type_.columns;
|
|
|
|
for (auto &c : m.c)
|
|
|
|
c.vecsize = constant_type_.vecsize;
|
|
|
|
}
|
|
|
|
|
2019-01-07 09:01:00 +00:00
|
|
|
inline bool constant_is_null() const
|
|
|
|
{
|
|
|
|
if (specialization)
|
|
|
|
return false;
|
|
|
|
if (!subconstants.empty())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (uint32_t col = 0; col < columns(); col++)
|
|
|
|
for (uint32_t row = 0; row < vector_size(); row++)
|
|
|
|
if (scalar_u64(col, row) != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-09-29 08:13:45 +00:00
|
|
|
explicit SPIRConstant(uint32_t constant_type_)
|
2017-08-02 08:33:03 +00:00
|
|
|
: constant_type(constant_type_)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2018-05-15 09:16:06 +00:00
|
|
|
SPIRConstant() = default;
|
|
|
|
|
2019-09-05 10:43:40 +00:00
|
|
|
SPIRConstant(TypeID constant_type_, const uint32_t *elements, uint32_t num_elements, bool specialized)
|
2017-09-28 11:28:44 +00:00
|
|
|
: constant_type(constant_type_)
|
|
|
|
, specialization(specialized)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
2019-09-05 10:43:40 +00:00
|
|
|
subconstants.reserve(num_elements);
|
|
|
|
for (uint32_t i = 0; i < num_elements; i++)
|
|
|
|
subconstants.push_back(elements[i]);
|
2017-09-27 13:16:33 +00:00
|
|
|
specialization = specialized;
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
|
|
|
|
2017-09-27 13:16:33 +00:00
|
|
|
// Construct scalar (32-bit).
|
2019-09-05 10:43:40 +00:00
|
|
|
SPIRConstant(TypeID constant_type_, uint32_t v0, bool specialized)
|
2017-09-28 11:28:44 +00:00
|
|
|
: constant_type(constant_type_)
|
|
|
|
, specialization(specialized)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
|
|
|
m.c[0].r[0].u32 = v0;
|
|
|
|
m.c[0].vecsize = 1;
|
|
|
|
m.columns = 1;
|
|
|
|
}
|
|
|
|
|
2017-09-27 13:16:33 +00:00
|
|
|
// Construct scalar (64-bit).
|
2019-09-05 10:43:40 +00:00
|
|
|
SPIRConstant(TypeID constant_type_, uint64_t v0, bool specialized)
|
2017-09-28 11:28:44 +00:00
|
|
|
: constant_type(constant_type_)
|
|
|
|
, specialization(specialized)
|
2016-07-27 08:59:00 +00:00
|
|
|
{
|
|
|
|
m.c[0].r[0].u64 = v0;
|
|
|
|
m.c[0].vecsize = 1;
|
|
|
|
m.columns = 1;
|
|
|
|
}
|
|
|
|
|
2017-09-27 14:10:29 +00:00
|
|
|
// Construct vectors and matrices.
|
2019-09-05 10:43:40 +00:00
|
|
|
SPIRConstant(TypeID constant_type_, const SPIRConstant *const *vector_elements, uint32_t num_elements,
|
2017-09-28 11:28:44 +00:00
|
|
|
bool specialized)
|
|
|
|
: constant_type(constant_type_)
|
|
|
|
, specialization(specialized)
|
2016-07-27 08:59:00 +00:00
|
|
|
{
|
2017-09-27 14:10:29 +00:00
|
|
|
bool matrix = vector_elements[0]->m.c[0].vecsize > 1;
|
2016-07-27 08:59:00 +00:00
|
|
|
|
2017-09-27 14:10:29 +00:00
|
|
|
if (matrix)
|
2017-09-27 13:16:33 +00:00
|
|
|
{
|
2017-09-27 14:10:29 +00:00
|
|
|
m.columns = num_elements;
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < num_elements; i++)
|
|
|
|
{
|
|
|
|
m.c[i] = vector_elements[i]->m.c[0];
|
|
|
|
if (vector_elements[i]->specialization)
|
|
|
|
m.id[i] = vector_elements[i]->self;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
m.c[0].vecsize = num_elements;
|
|
|
|
m.columns = 1;
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < num_elements; i++)
|
|
|
|
{
|
|
|
|
m.c[0].r[i] = vector_elements[i]->m.c[0].r[0];
|
|
|
|
if (vector_elements[i]->specialization)
|
|
|
|
m.c[0].id[i] = vector_elements[i]->self;
|
|
|
|
}
|
2017-09-27 13:16:33 +00:00
|
|
|
}
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
|
|
|
|
2019-09-05 10:43:40 +00:00
|
|
|
TypeID constant_type = 0;
|
2016-05-05 07:33:18 +00:00
|
|
|
ConstantMatrix m;
|
2018-05-15 09:16:06 +00:00
|
|
|
|
|
|
|
// If this constant is a specialization constant (i.e. created with OpSpecConstant*).
|
|
|
|
bool specialization = false;
|
|
|
|
// If this constant is used as an array length which creates specialization restrictions on some backends.
|
|
|
|
bool is_used_as_array_length = false;
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2018-07-05 11:25:57 +00:00
|
|
|
// If true, this is a LUT, and should always be declared in the outer scope.
|
|
|
|
bool is_used_as_lut = false;
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
// For composites which are constant arrays, etc.
|
2019-09-05 10:43:40 +00:00
|
|
|
SmallVector<ConstantID> subconstants;
|
2018-10-05 09:30:57 +00:00
|
|
|
|
2018-11-01 09:53:00 +00:00
|
|
|
// Non-Vulkan GLSL, HLSL and sometimes MSL emits defines for each specialization constant,
|
2018-10-30 06:31:32 +00:00
|
|
|
// and uses them to initialize the constant. This allows the user
|
|
|
|
// to still be able to specialize the value by supplying corresponding
|
|
|
|
// preprocessor directives before compiling the shader.
|
2018-11-01 09:53:00 +00:00
|
|
|
std::string specialization_constant_macro_name;
|
2018-10-30 06:31:32 +00:00
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
SPIRV_CROSS_DECLARE_CLONE(SPIRConstant)
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
|
|
|
|
2019-04-02 09:19:03 +00:00
|
|
|
// Variants have a very specific allocation scheme.
|
|
|
|
struct ObjectPoolGroup
|
|
|
|
{
|
|
|
|
std::unique_ptr<ObjectPoolBase> pools[TypeCount];
|
|
|
|
};
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
class Variant
|
|
|
|
{
|
|
|
|
public:
|
2019-04-02 09:19:03 +00:00
|
|
|
explicit Variant(ObjectPoolGroup *group_)
|
|
|
|
: group(group_)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
~Variant()
|
|
|
|
{
|
|
|
|
if (holder)
|
2021-09-30 12:12:08 +00:00
|
|
|
group->pools[type]->deallocate_opaque(holder);
|
2019-04-02 09:19:03 +00:00
|
|
|
}
|
2018-10-05 09:30:57 +00:00
|
|
|
|
|
|
|
// Marking custom move constructor as noexcept is important.
|
|
|
|
Variant(Variant &&other) SPIRV_CROSS_NOEXCEPT
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
|
|
|
*this = std::move(other);
|
|
|
|
}
|
2018-10-05 09:30:57 +00:00
|
|
|
|
2019-04-02 09:19:03 +00:00
|
|
|
// We cannot copy from other variant without our own pool group.
|
|
|
|
// Have to explicitly copy.
|
|
|
|
Variant(const Variant &variant) = delete;
|
2018-10-05 09:30:57 +00:00
|
|
|
|
|
|
|
// Marking custom move constructor as noexcept is important.
|
|
|
|
Variant &operator=(Variant &&other) SPIRV_CROSS_NOEXCEPT
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
|
|
|
if (this != &other)
|
|
|
|
{
|
2019-04-02 09:19:03 +00:00
|
|
|
if (holder)
|
2021-09-30 12:12:08 +00:00
|
|
|
group->pools[type]->deallocate_opaque(holder);
|
2019-04-02 09:19:03 +00:00
|
|
|
holder = other.holder;
|
|
|
|
group = other.group;
|
2016-05-05 07:33:18 +00:00
|
|
|
type = other.type;
|
2018-10-05 09:30:57 +00:00
|
|
|
allow_type_rewrite = other.allow_type_rewrite;
|
2019-04-02 09:19:03 +00:00
|
|
|
|
|
|
|
other.holder = nullptr;
|
2016-05-05 07:33:18 +00:00
|
|
|
other.type = TypeNone;
|
|
|
|
}
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
// This copy/clone should only be called in the Compiler constructor.
|
|
|
|
// If this is called inside ::compile(), we invalidate any references we took higher in the stack.
|
|
|
|
// This should never happen.
|
|
|
|
Variant &operator=(const Variant &other)
|
|
|
|
{
|
2019-04-09 11:07:30 +00:00
|
|
|
//#define SPIRV_CROSS_COPY_CONSTRUCTOR_SANITIZE
|
2018-10-05 09:30:57 +00:00
|
|
|
#ifdef SPIRV_CROSS_COPY_CONSTRUCTOR_SANITIZE
|
|
|
|
abort();
|
|
|
|
#endif
|
|
|
|
if (this != &other)
|
|
|
|
{
|
2019-04-02 09:19:03 +00:00
|
|
|
if (holder)
|
2021-09-30 12:12:08 +00:00
|
|
|
group->pools[type]->deallocate_opaque(holder);
|
2019-04-02 09:19:03 +00:00
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
if (other.holder)
|
2019-04-02 09:19:03 +00:00
|
|
|
holder = other.holder->clone(group->pools[other.type].get());
|
2019-04-09 13:25:11 +00:00
|
|
|
else
|
|
|
|
holder = nullptr;
|
2019-04-02 09:19:03 +00:00
|
|
|
|
2018-10-05 09:30:57 +00:00
|
|
|
type = other.type;
|
|
|
|
allow_type_rewrite = other.allow_type_rewrite;
|
|
|
|
}
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
2019-04-02 09:19:03 +00:00
|
|
|
void set(IVariant *val, Types new_type)
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
2019-04-02 09:19:03 +00:00
|
|
|
if (holder)
|
2021-09-30 12:12:08 +00:00
|
|
|
group->pools[type]->deallocate_opaque(holder);
|
2019-04-09 13:25:11 +00:00
|
|
|
holder = nullptr;
|
|
|
|
|
2019-09-23 22:05:04 +00:00
|
|
|
if (!allow_type_rewrite && type != TypeNone && type != new_type)
|
2019-04-09 13:25:11 +00:00
|
|
|
{
|
|
|
|
if (val)
|
2021-09-30 12:12:08 +00:00
|
|
|
group->pools[new_type]->deallocate_opaque(val);
|
2016-12-12 21:33:22 +00:00
|
|
|
SPIRV_CROSS_THROW("Overwriting a variant with new type.");
|
2019-09-23 22:05:04 +00:00
|
|
|
}
|
2019-04-09 13:25:11 +00:00
|
|
|
|
|
|
|
holder = val;
|
2016-05-05 07:33:18 +00:00
|
|
|
type = new_type;
|
2018-05-11 08:14:20 +00:00
|
|
|
allow_type_rewrite = false;
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
|
|
|
|
2019-04-02 09:19:03 +00:00
|
|
|
template <typename T, typename... Ts>
|
|
|
|
T *allocate_and_set(Types new_type, Ts &&... ts)
|
|
|
|
{
|
|
|
|
T *val = static_cast<ObjectPool<T> &>(*group->pools[new_type]).allocate(std::forward<Ts>(ts)...);
|
|
|
|
set(val, new_type);
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
template <typename T>
|
|
|
|
T &get()
|
|
|
|
{
|
|
|
|
if (!holder)
|
2016-12-12 21:33:22 +00:00
|
|
|
SPIRV_CROSS_THROW("nullptr");
|
2019-01-10 08:49:33 +00:00
|
|
|
if (static_cast<Types>(T::type) != type)
|
2016-12-12 21:33:22 +00:00
|
|
|
SPIRV_CROSS_THROW("Bad cast");
|
2019-04-02 09:19:03 +00:00
|
|
|
return *static_cast<T *>(holder);
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
const T &get() const
|
|
|
|
{
|
|
|
|
if (!holder)
|
2016-12-12 21:33:22 +00:00
|
|
|
SPIRV_CROSS_THROW("nullptr");
|
2019-01-10 08:49:33 +00:00
|
|
|
if (static_cast<Types>(T::type) != type)
|
2016-12-12 21:33:22 +00:00
|
|
|
SPIRV_CROSS_THROW("Bad cast");
|
2019-04-02 09:19:03 +00:00
|
|
|
return *static_cast<const T *>(holder);
|
2016-05-05 07:33:18 +00:00
|
|
|
}
|
|
|
|
|
2019-01-10 08:49:33 +00:00
|
|
|
Types get_type() const
|
2016-05-05 07:33:18 +00:00
|
|
|
{
|
|
|
|
return type;
|
|
|
|
}
|
2018-10-05 09:30:57 +00:00
|
|
|
|
2019-09-05 10:43:40 +00:00
|
|
|
ID get_id() const
|
2017-11-06 02:34:42 +00:00
|
|
|
{
|
2019-09-05 10:43:40 +00:00
|
|
|
return holder ? holder->self : ID(0);
|
2017-11-06 02:34:42 +00:00
|
|
|
}
|
2018-10-05 09:30:57 +00:00
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
bool empty() const
|
|
|
|
{
|
|
|
|
return !holder;
|
|
|
|
}
|
2018-10-05 09:30:57 +00:00
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
void reset()
|
|
|
|
{
|
2019-04-02 09:19:03 +00:00
|
|
|
if (holder)
|
2021-09-30 12:12:08 +00:00
|
|
|
group->pools[type]->deallocate_opaque(holder);
|
2019-04-02 09:19:03 +00:00
|
|
|
holder = nullptr;
|
2016-05-05 07:33:18 +00:00
|
|
|
type = TypeNone;
|
|
|
|
}
|
|
|
|
|
2018-05-11 08:14:20 +00:00
|
|
|
void set_allow_type_rewrite()
|
|
|
|
{
|
|
|
|
allow_type_rewrite = true;
|
|
|
|
}
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
private:
|
2019-04-02 09:19:03 +00:00
|
|
|
ObjectPoolGroup *group = nullptr;
|
|
|
|
IVariant *holder = nullptr;
|
2019-01-10 08:49:33 +00:00
|
|
|
Types type = TypeNone;
|
2018-05-11 08:14:20 +00:00
|
|
|
bool allow_type_rewrite = false;
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T &variant_get(Variant &var)
|
|
|
|
{
|
|
|
|
return var.get<T>();
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
const T &variant_get(const Variant &var)
|
|
|
|
{
|
|
|
|
return var.get<T>();
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T, typename... P>
|
|
|
|
T &variant_set(Variant &var, P &&... args)
|
|
|
|
{
|
2019-04-02 09:19:03 +00:00
|
|
|
auto *ptr = var.allocate_and_set<T>(static_cast<Types>(T::type), std::forward<P>(args)...);
|
2016-05-05 07:33:18 +00:00
|
|
|
return *ptr;
|
|
|
|
}
|
|
|
|
|
2018-11-22 10:55:57 +00:00
|
|
|
struct AccessChainMeta
|
|
|
|
{
|
2019-07-18 11:34:47 +00:00
|
|
|
uint32_t storage_physical_type = 0;
|
2018-11-22 10:55:57 +00:00
|
|
|
bool need_transpose = false;
|
|
|
|
bool storage_is_packed = false;
|
|
|
|
bool storage_is_invariant = false;
|
2020-07-03 11:12:05 +00:00
|
|
|
bool flattened_struct = false;
|
2022-04-29 11:49:02 +00:00
|
|
|
bool relaxed_precision = false;
|
2023-07-01 13:49:35 +00:00
|
|
|
bool access_meshlet_position_y = false;
|
2018-11-22 10:55:57 +00:00
|
|
|
};
|
|
|
|
|
2019-07-18 11:34:47 +00:00
|
|
|
enum ExtendedDecorations
|
|
|
|
{
|
|
|
|
// Marks if a buffer block is re-packed, i.e. member declaration might be subject to PhysicalTypeID remapping and padding.
|
|
|
|
SPIRVCrossDecorationBufferBlockRepacked = 0,
|
|
|
|
|
|
|
|
// A type in a buffer block might be declared with a different physical type than the logical type.
|
|
|
|
// If this is not set, PhysicalTypeID == the SPIR-V type as declared.
|
|
|
|
SPIRVCrossDecorationPhysicalTypeID,
|
|
|
|
|
|
|
|
// Marks if the physical type is to be declared with tight packing rules, i.e. packed_floatN on MSL and friends.
|
|
|
|
// If this is set, PhysicalTypeID might also be set. It can be set to same as logical type if all we're doing
|
|
|
|
// is converting float3 to packed_float3 for example.
|
2019-07-19 12:18:14 +00:00
|
|
|
// If this is marked on a struct, it means the struct itself must use only Packed types for all its members.
|
2019-07-18 11:34:47 +00:00
|
|
|
SPIRVCrossDecorationPhysicalTypePacked,
|
|
|
|
|
|
|
|
// The padding in bytes before declaring this struct member.
|
2019-07-22 08:23:39 +00:00
|
|
|
// If used on a struct type, marks the target size of a struct.
|
|
|
|
SPIRVCrossDecorationPaddingTarget,
|
2019-07-18 11:34:47 +00:00
|
|
|
|
|
|
|
SPIRVCrossDecorationInterfaceMemberIndex,
|
|
|
|
SPIRVCrossDecorationInterfaceOrigID,
|
|
|
|
SPIRVCrossDecorationResourceIndexPrimary,
|
|
|
|
// Used for decorations like resource indices for samplers when part of combined image samplers.
|
|
|
|
// A variable might need to hold two resource indices in this case.
|
|
|
|
SPIRVCrossDecorationResourceIndexSecondary,
|
MSL: Add support for sampler Y'CbCr conversion.
This change introduces functions and in one case, a class, to support
the `VK_KHR_sampler_ycbcr_conversion` extension. Except in the case of
GBGR8 and BGRG8 formats, for which Metal natively supports implicit
chroma reconstruction, we're on our own here. We have to do everything
ourselves. Much of the complexity comes from the need to support
multiple planes, which must now be passed to functions that use the
corresponding combined image-samplers. The rest is from the actual
Y'CbCr conversion itself, which requires additional post-processing of
the sample retrieved from the image.
Passing sampled images to a function was a particular problem. To
support this, I've added a new class which is emitted to MSL shaders
that pass sampled images with Y'CbCr conversions attached around. It
can handle sampled images with or without Y'CbCr conversion. This is an
awful abomination that should not exist, but I'm worried that there's
some shader out there which does this. This support requires Metal 2.0
to work properly, because it uses default-constructed texture objects,
which were only added in MSL 2. I'm not even going to get into arrays of
combined image-samplers--that's a whole other can of worms. They are
deliberately unsupported in this change.
I've taken the liberty of refactoring the support for texture swizzling
while I'm at it. It's now treated as a post-processing step similar to
Y'CbCr conversion. I'd like to think this is cleaner than having
everything in `to_function_name()`/`to_function_args()`. It still looks
really hairy, though. I did, however, get rid of the explicit type
arguments to `spvGatherSwizzle()`/`spvGatherCompareSwizzle()`.
Update the C API. In addition to supporting this new functionality, add
some compiler options that I added in previous changes, but for which I
neglected to update the C API.
2019-08-02 20:11:19 +00:00
|
|
|
// Used for resource indices for multiplanar images when part of combined image samplers.
|
|
|
|
SPIRVCrossDecorationResourceIndexTertiary,
|
|
|
|
SPIRVCrossDecorationResourceIndexQuaternary,
|
2019-07-18 11:34:47 +00:00
|
|
|
|
|
|
|
// Marks a buffer block for using explicit offsets (GLSL/HLSL).
|
|
|
|
SPIRVCrossDecorationExplicitOffset,
|
|
|
|
|
MSL: Add support for processing more than one patch per workgroup.
This should hopefully reduce underutilization of the GPU, especially on
GPUs where the thread execution width is greater than the number of
control points.
This also simplifies initialization by reading the buffer directly
instead of using Metal's vertex-attribute-in-compute support. It turns
out the only way in which shader stages are allowed to differ in their
interfaces is in the number of components per vector; the base type must
be the same. Since we are using the raw buffer instead of attributes, we
can now also emit arrays and matrices directly into the buffer, instead
of flattening them and then unpacking them. Structs are still flattened,
however; this is due to the need to handle vectors with fewer components
than were output, and I think handling this while also directly emitting
structs could get ugly.
Another advantage of this scheme is that the extra invocations needed to
read the attributes when there were more input than output points are
now no more. The number of threads per workgroup is now lcm(SIMD-size,
output control points). This should ensure we always process a whole
number of patches per workgroup.
To avoid complexity handling indices in the tessellation control shader,
I've also changed the way vertex shaders for tessellation are handled.
They are now compute kernels using Metal's support for vertex-style
stage input. This lets us always emit vertices into the buffer in order
of vertex shader execution. Now we no longer have to deal with indexing
in the tessellation control shader. This also fixes a long-standing
issue where if an index were greater than the number of vertices to
draw, the vertex shader would wind up writing outside the buffer, and
the vertex would be lost.
This is a breaking change, and I know SPIRV-Cross has other clients, so
I've hidden this behind an option for now. In the future, I want to
remove this option and make it the default.
2020-02-21 03:38:28 +00:00
|
|
|
// Apply to a variable in the Input storage class; marks it as holding the base group passed to vkCmdDispatchBase(),
|
|
|
|
// or the base vertex and instance indices passed to vkCmdDrawIndexed().
|
|
|
|
// In MSL, this is used to adjust the WorkgroupId and GlobalInvocationId variables in compute shaders,
|
|
|
|
// and to hold the BaseVertex and BaseInstance variables in vertex shaders.
|
2019-07-22 18:08:04 +00:00
|
|
|
SPIRVCrossDecorationBuiltInDispatchBase,
|
|
|
|
|
MSL: Add support for sampler Y'CbCr conversion.
This change introduces functions and in one case, a class, to support
the `VK_KHR_sampler_ycbcr_conversion` extension. Except in the case of
GBGR8 and BGRG8 formats, for which Metal natively supports implicit
chroma reconstruction, we're on our own here. We have to do everything
ourselves. Much of the complexity comes from the need to support
multiple planes, which must now be passed to functions that use the
corresponding combined image-samplers. The rest is from the actual
Y'CbCr conversion itself, which requires additional post-processing of
the sample retrieved from the image.
Passing sampled images to a function was a particular problem. To
support this, I've added a new class which is emitted to MSL shaders
that pass sampled images with Y'CbCr conversions attached around. It
can handle sampled images with or without Y'CbCr conversion. This is an
awful abomination that should not exist, but I'm worried that there's
some shader out there which does this. This support requires Metal 2.0
to work properly, because it uses default-constructed texture objects,
which were only added in MSL 2. I'm not even going to get into arrays of
combined image-samplers--that's a whole other can of worms. They are
deliberately unsupported in this change.
I've taken the liberty of refactoring the support for texture swizzling
while I'm at it. It's now treated as a post-processing step similar to
Y'CbCr conversion. I'd like to think this is cleaner than having
everything in `to_function_name()`/`to_function_args()`. It still looks
really hairy, though. I did, however, get rid of the explicit type
arguments to `spvGatherSwizzle()`/`spvGatherCompareSwizzle()`.
Update the C API. In addition to supporting this new functionality, add
some compiler options that I added in previous changes, but for which I
neglected to update the C API.
2019-08-02 20:11:19 +00:00
|
|
|
// Apply to a variable that is a function parameter; marks it as being a "dynamic"
|
|
|
|
// combined image-sampler. In MSL, this is used when a function parameter might hold
|
|
|
|
// either a regular combined image-sampler or one that has an attached sampler
|
|
|
|
// Y'CbCr conversion.
|
|
|
|
SPIRVCrossDecorationDynamicImageSampler,
|
|
|
|
|
MSL: Add support for processing more than one patch per workgroup.
This should hopefully reduce underutilization of the GPU, especially on
GPUs where the thread execution width is greater than the number of
control points.
This also simplifies initialization by reading the buffer directly
instead of using Metal's vertex-attribute-in-compute support. It turns
out the only way in which shader stages are allowed to differ in their
interfaces is in the number of components per vector; the base type must
be the same. Since we are using the raw buffer instead of attributes, we
can now also emit arrays and matrices directly into the buffer, instead
of flattening them and then unpacking them. Structs are still flattened,
however; this is due to the need to handle vectors with fewer components
than were output, and I think handling this while also directly emitting
structs could get ugly.
Another advantage of this scheme is that the extra invocations needed to
read the attributes when there were more input than output points are
now no more. The number of threads per workgroup is now lcm(SIMD-size,
output control points). This should ensure we always process a whole
number of patches per workgroup.
To avoid complexity handling indices in the tessellation control shader,
I've also changed the way vertex shaders for tessellation are handled.
They are now compute kernels using Metal's support for vertex-style
stage input. This lets us always emit vertices into the buffer in order
of vertex shader execution. Now we no longer have to deal with indexing
in the tessellation control shader. This also fixes a long-standing
issue where if an index were greater than the number of vertices to
draw, the vertex shader would wind up writing outside the buffer, and
the vertex would be lost.
This is a breaking change, and I know SPIRV-Cross has other clients, so
I've hidden this behind an option for now. In the future, I want to
remove this option and make it the default.
2020-02-21 03:38:28 +00:00
|
|
|
// Apply to a variable in the Input storage class; marks it as holding the size of the stage
|
|
|
|
// input grid.
|
|
|
|
// In MSL, this is used to hold the vertex and instance counts in a tessellation pipeline
|
|
|
|
// vertex shader.
|
|
|
|
SPIRVCrossDecorationBuiltInStageInputSize,
|
|
|
|
|
|
|
|
// Apply to any access chain of a tessellation I/O variable; stores the type of the sub-object
|
|
|
|
// that was chained to, as recorded in the input variable itself. This is used in case the pointer
|
|
|
|
// is itself used as the base of an access chain, to calculate the original type of the sub-object
|
|
|
|
// chained to, in case a swizzle needs to be applied. This should not happen normally with valid
|
|
|
|
// SPIR-V, but the MSL backend can change the type of input variables, necessitating the
|
|
|
|
// addition of swizzles to keep the generated code compiling.
|
|
|
|
SPIRVCrossDecorationTessIOOriginalInputTypeID,
|
|
|
|
|
MSL: Support pull-model interpolation on MSL 2.3+.
New in MSL 2.3 is a template that can be used in the place of a scalar
type in a stage-in struct. This template has methods which interpolate
the varying at the given points. Curiously, you can't set interpolation
attributes on such a varying; perspective-correctness is encoded in the
type, while interpolation must be done using one of the methods. This
makes using this somewhat awkward from SPIRV-Cross, requiring us to jump
through a bunch of hoops to make this all work.
Using varyings from functions in particular is a pain point, requiring
us to pass the stage-in struct itself around. An alternative is to pass
references to the interpolants; except this will fall over badly with
composite types, which naturally must be flattened. As with
tessellation, dynamic indexing isn't supported with pull-model
interpolation. This is because of the need to reference the original
struct member in order to call one of the pull-model interpolation
methods on it. Also, this is done at the variable level; this means that
if one varying in a struct is used with the pull-model functions, then
the entire struct is emitted as pull-model interpolants.
For some reason, this was not documented in the MSL spec, though there
is a property on `MTLDevice`, `supportsPullModelInterpolation`,
indicating support for this, which *is* documented. This does not appear
to be implemented yet for AMD: it returns `NO` from
`supportsPullModelInterpolation`, and pipelines with shaders using the
templates fail to compile. It *is* implemeted for Intel. It's probably
also implemented for Apple GPUs: on Apple Silicon, OpenGL calls down to
Metal, and it wouldn't be possible to use the interpolation functions
without this implemented in Metal.
Based on my testing, where SPIR-V and GLSL have the offset relative to
the pixel center, in Metal it appears to be relative to the pixel's
upper-left corner, as in HLSL. Therefore, I've added an offset 0.4375,
i.e. one half minus one sixteenth, to all arguments to
`interpolate_at_offset()`.
This also fixes a long-standing bug: if a pull-model interpolation
function is used on a varying, make sure that varying is declared. We
were already doing this only for the AMD pull-model function,
`interpolateAtVertexAMD()`; for reasons which are completely beyond me,
we weren't doing this for the base interpolation functions. I also note
that there are no tests for the interpolation functions for GLSL or
HLSL.
2020-11-03 02:56:46 +00:00
|
|
|
// Apply to any access chain of an interface variable used with pull-model interpolation, where the variable is a
|
|
|
|
// vector but the resulting pointer is a scalar; stores the component index that is to be accessed by the chain.
|
|
|
|
// This is used when emitting calls to interpolation functions on the chain in MSL: in this case, the component
|
|
|
|
// must be applied to the result, since pull-model interpolants in MSL cannot be swizzled directly, but the
|
|
|
|
// results of interpolation can.
|
|
|
|
SPIRVCrossDecorationInterpolantComponentExpr,
|
|
|
|
|
2022-08-05 08:16:45 +00:00
|
|
|
// Apply to any struct type that is used in the Workgroup storage class.
|
|
|
|
// This causes matrices in MSL prior to Metal 3.0 to be emitted using a special
|
|
|
|
// class that is convertible to the standard matrix type, to work around the
|
|
|
|
// lack of constructors in the 'threadgroup' address space.
|
|
|
|
SPIRVCrossDecorationWorkgroupStruct,
|
|
|
|
|
2019-07-18 11:34:47 +00:00
|
|
|
SPIRVCrossDecorationCount
|
|
|
|
};
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
struct Meta
|
|
|
|
{
|
|
|
|
struct Decoration
|
|
|
|
{
|
|
|
|
std::string alias;
|
2016-10-24 13:24:24 +00:00
|
|
|
std::string qualified_alias;
|
2018-03-20 19:04:12 +00:00
|
|
|
std::string hlsl_semantic;
|
2023-05-02 21:36:43 +00:00
|
|
|
std::string user_type;
|
2018-03-12 12:09:25 +00:00
|
|
|
Bitset decoration_flags;
|
2019-01-08 10:03:59 +00:00
|
|
|
spv::BuiltIn builtin_type = spv::BuiltInMax;
|
2016-05-05 07:33:18 +00:00
|
|
|
uint32_t location = 0;
|
2018-09-05 22:31:10 +00:00
|
|
|
uint32_t component = 0;
|
2016-05-05 07:33:18 +00:00
|
|
|
uint32_t set = 0;
|
|
|
|
uint32_t binding = 0;
|
|
|
|
uint32_t offset = 0;
|
2020-01-27 11:56:48 +00:00
|
|
|
uint32_t xfb_buffer = 0;
|
|
|
|
uint32_t xfb_stride = 0;
|
2020-09-30 11:01:35 +00:00
|
|
|
uint32_t stream = 0;
|
2016-05-05 07:33:18 +00:00
|
|
|
uint32_t array_stride = 0;
|
2017-01-21 10:30:33 +00:00
|
|
|
uint32_t matrix_stride = 0;
|
2016-05-05 07:33:18 +00:00
|
|
|
uint32_t input_attachment = 0;
|
2016-09-17 13:16:07 +00:00
|
|
|
uint32_t spec_id = 0;
|
2018-04-03 13:56:22 +00:00
|
|
|
uint32_t index = 0;
|
2019-01-07 09:51:44 +00:00
|
|
|
spv::FPRoundingMode fp_rounding_mode = spv::FPRoundingModeMax;
|
2016-05-05 07:33:18 +00:00
|
|
|
bool builtin = false;
|
2024-01-05 16:09:24 +00:00
|
|
|
bool qualified_alias_explicit_override = false;
|
2019-01-17 10:22:24 +00:00
|
|
|
|
2019-07-18 11:34:47 +00:00
|
|
|
struct Extended
|
2019-01-17 10:22:24 +00:00
|
|
|
{
|
2019-07-18 11:34:47 +00:00
|
|
|
Extended()
|
|
|
|
{
|
|
|
|
// MSVC 2013 workaround to init like this.
|
|
|
|
for (auto &v : values)
|
|
|
|
v = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
Bitset flags;
|
|
|
|
uint32_t values[SPIRVCrossDecorationCount];
|
2019-01-17 10:22:24 +00:00
|
|
|
} extended;
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
Decoration decoration;
|
2019-04-02 09:19:03 +00:00
|
|
|
|
|
|
|
// Intentionally not a SmallVector. Decoration is large and somewhat rare.
|
|
|
|
Vector<Decoration> members;
|
2017-04-25 18:10:24 +00:00
|
|
|
|
2017-04-26 07:25:28 +00:00
|
|
|
std::unordered_map<uint32_t, uint32_t> decoration_word_offset;
|
2017-04-19 15:33:14 +00:00
|
|
|
|
2018-11-22 09:23:58 +00:00
|
|
|
// For SPV_GOOGLE_hlsl_functionality1.
|
2018-03-20 19:04:12 +00:00
|
|
|
bool hlsl_is_magic_counter_buffer = false;
|
|
|
|
// ID for the sibling counter buffer.
|
|
|
|
uint32_t hlsl_magic_counter_buffer = 0;
|
2016-05-05 07:33:18 +00:00
|
|
|
};
|
2016-09-20 08:17:41 +00:00
|
|
|
|
|
|
|
// A user callback that remaps the type of any variable.
|
2016-09-20 08:55:09 +00:00
|
|
|
// var_name is the declared name of the variable.
|
|
|
|
// name_of_type is the textual name of the type which will be used in the code unless written to by the callback.
|
2016-09-20 08:17:41 +00:00
|
|
|
using VariableTypeRemapCallback =
|
|
|
|
std::function<void(const SPIRType &type, const std::string &var_name, std::string &name_of_type)>;
|
2017-01-11 14:57:05 +00:00
|
|
|
|
2018-02-23 13:13:46 +00:00
|
|
|
class Hasher
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
inline void u32(uint32_t value)
|
|
|
|
{
|
|
|
|
h = (h * 0x100000001b3ull) ^ value;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline uint64_t get() const
|
|
|
|
{
|
|
|
|
return h;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
uint64_t h = 0xcbf29ce484222325ull;
|
|
|
|
};
|
2018-03-07 09:21:25 +00:00
|
|
|
|
|
|
|
static inline bool type_is_floating_point(const SPIRType &type)
|
|
|
|
{
|
|
|
|
return type.basetype == SPIRType::Half || type.basetype == SPIRType::Float || type.basetype == SPIRType::Double;
|
|
|
|
}
|
2018-09-04 21:08:22 +00:00
|
|
|
|
|
|
|
static inline bool type_is_integral(const SPIRType &type)
|
|
|
|
{
|
2018-11-01 22:20:07 +00:00
|
|
|
return type.basetype == SPIRType::SByte || type.basetype == SPIRType::UByte || type.basetype == SPIRType::Short ||
|
|
|
|
type.basetype == SPIRType::UShort || type.basetype == SPIRType::Int || type.basetype == SPIRType::UInt ||
|
|
|
|
type.basetype == SPIRType::Int64 || type.basetype == SPIRType::UInt64;
|
2018-09-04 21:08:22 +00:00
|
|
|
}
|
2019-01-30 13:49:55 +00:00
|
|
|
|
|
|
|
static inline SPIRType::BaseType to_signed_basetype(uint32_t width)
|
|
|
|
{
|
|
|
|
switch (width)
|
|
|
|
{
|
|
|
|
case 8:
|
|
|
|
return SPIRType::SByte;
|
|
|
|
case 16:
|
|
|
|
return SPIRType::Short;
|
|
|
|
case 32:
|
|
|
|
return SPIRType::Int;
|
|
|
|
case 64:
|
|
|
|
return SPIRType::Int64;
|
|
|
|
default:
|
|
|
|
SPIRV_CROSS_THROW("Invalid bit width.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline SPIRType::BaseType to_unsigned_basetype(uint32_t width)
|
|
|
|
{
|
|
|
|
switch (width)
|
|
|
|
{
|
|
|
|
case 8:
|
|
|
|
return SPIRType::UByte;
|
|
|
|
case 16:
|
|
|
|
return SPIRType::UShort;
|
|
|
|
case 32:
|
|
|
|
return SPIRType::UInt;
|
|
|
|
case 64:
|
|
|
|
return SPIRType::UInt64;
|
|
|
|
default:
|
|
|
|
SPIRV_CROSS_THROW("Invalid bit width.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns true if an arithmetic operation does not change behavior depending on signedness.
|
|
|
|
static inline bool opcode_is_sign_invariant(spv::Op opcode)
|
|
|
|
{
|
|
|
|
switch (opcode)
|
|
|
|
{
|
|
|
|
case spv::OpIEqual:
|
|
|
|
case spv::OpINotEqual:
|
|
|
|
case spv::OpISub:
|
|
|
|
case spv::OpIAdd:
|
|
|
|
case spv::OpIMul:
|
|
|
|
case spv::OpShiftLeftLogical:
|
|
|
|
case spv::OpBitwiseOr:
|
|
|
|
case spv::OpBitwiseXor:
|
|
|
|
case spv::OpBitwiseAnd:
|
|
|
|
return true;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2020-01-09 10:18:14 +00:00
|
|
|
|
2022-10-31 12:05:56 +00:00
|
|
|
static inline bool opcode_can_promote_integer_implicitly(spv::Op opcode)
|
|
|
|
{
|
|
|
|
switch (opcode)
|
|
|
|
{
|
|
|
|
case spv::OpSNegate:
|
|
|
|
case spv::OpNot:
|
|
|
|
case spv::OpBitwiseAnd:
|
|
|
|
case spv::OpBitwiseOr:
|
|
|
|
case spv::OpBitwiseXor:
|
|
|
|
case spv::OpShiftLeftLogical:
|
|
|
|
case spv::OpShiftRightLogical:
|
|
|
|
case spv::OpShiftRightArithmetic:
|
|
|
|
case spv::OpIAdd:
|
|
|
|
case spv::OpISub:
|
|
|
|
case spv::OpIMul:
|
|
|
|
case spv::OpSDiv:
|
|
|
|
case spv::OpUDiv:
|
|
|
|
case spv::OpSRem:
|
|
|
|
case spv::OpUMod:
|
|
|
|
case spv::OpSMod:
|
|
|
|
return true;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-09 10:18:14 +00:00
|
|
|
struct SetBindingPair
|
|
|
|
{
|
|
|
|
uint32_t desc_set;
|
|
|
|
uint32_t binding;
|
|
|
|
|
|
|
|
inline bool operator==(const SetBindingPair &other) const
|
|
|
|
{
|
|
|
|
return desc_set == other.desc_set && binding == other.binding;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline bool operator<(const SetBindingPair &other) const
|
|
|
|
{
|
|
|
|
return desc_set < other.desc_set || (desc_set == other.desc_set && binding < other.binding);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-03-25 17:08:49 +00:00
|
|
|
struct LocationComponentPair
|
|
|
|
{
|
|
|
|
uint32_t location;
|
|
|
|
uint32_t component;
|
|
|
|
|
|
|
|
inline bool operator==(const LocationComponentPair &other) const
|
|
|
|
{
|
|
|
|
return location == other.location && component == other.component;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline bool operator<(const LocationComponentPair &other) const
|
|
|
|
{
|
|
|
|
return location < other.location || (location == other.location && component < other.component);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-01-09 10:18:14 +00:00
|
|
|
struct StageSetBinding
|
|
|
|
{
|
|
|
|
spv::ExecutionModel model;
|
|
|
|
uint32_t desc_set;
|
|
|
|
uint32_t binding;
|
|
|
|
|
|
|
|
inline bool operator==(const StageSetBinding &other) const
|
|
|
|
{
|
|
|
|
return model == other.model && desc_set == other.desc_set && binding == other.binding;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
struct InternalHasher
|
|
|
|
{
|
|
|
|
inline size_t operator()(const SetBindingPair &value) const
|
|
|
|
{
|
|
|
|
// Quality of hash doesn't really matter here.
|
|
|
|
auto hash_set = std::hash<uint32_t>()(value.desc_set);
|
|
|
|
auto hash_binding = std::hash<uint32_t>()(value.binding);
|
|
|
|
return (hash_set * 0x10001b31) ^ hash_binding;
|
|
|
|
}
|
|
|
|
|
2021-03-25 17:08:49 +00:00
|
|
|
inline size_t operator()(const LocationComponentPair &value) const
|
|
|
|
{
|
|
|
|
// Quality of hash doesn't really matter here.
|
|
|
|
auto hash_set = std::hash<uint32_t>()(value.location);
|
|
|
|
auto hash_binding = std::hash<uint32_t>()(value.component);
|
|
|
|
return (hash_set * 0x10001b31) ^ hash_binding;
|
|
|
|
}
|
|
|
|
|
2020-01-09 10:18:14 +00:00
|
|
|
inline size_t operator()(const StageSetBinding &value) const
|
|
|
|
{
|
|
|
|
// Quality of hash doesn't really matter here.
|
|
|
|
auto hash_model = std::hash<uint32_t>()(value.model);
|
|
|
|
auto hash_set = std::hash<uint32_t>()(value.desc_set);
|
|
|
|
auto tmp_hash = (hash_model * 0x10001b31) ^ hash_set;
|
|
|
|
return (tmp_hash * 0x10001b31) ^ value.binding;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Special constant used in a {MSL,HLSL}ResourceBinding desc_set
|
|
|
|
// element to indicate the bindings for the push constants.
|
|
|
|
static const uint32_t ResourceBindingPushConstantDescriptorSet = ~(0u);
|
|
|
|
|
|
|
|
// Special constant used in a {MSL,HLSL}ResourceBinding binding
|
|
|
|
// element to indicate the bindings for the push constants.
|
|
|
|
static const uint32_t ResourceBindingPushConstantBinding = 0;
|
2019-04-02 09:19:03 +00:00
|
|
|
} // namespace SPIRV_CROSS_NAMESPACE
|
2016-05-05 07:33:18 +00:00
|
|
|
|
2019-09-05 10:43:40 +00:00
|
|
|
namespace std
|
|
|
|
{
|
|
|
|
template <SPIRV_CROSS_NAMESPACE::Types type>
|
|
|
|
struct hash<SPIRV_CROSS_NAMESPACE::TypedID<type>>
|
|
|
|
{
|
|
|
|
size_t operator()(const SPIRV_CROSS_NAMESPACE::TypedID<type> &value) const
|
|
|
|
{
|
|
|
|
return std::hash<uint32_t>()(value);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace std
|
|
|
|
|
2016-05-05 07:33:18 +00:00
|
|
|
#endif
|