Merge pull request #972 from amaury-ml/mat4/inverse

glm-aarch64: Add neon's mat4 inverse #972
This commit is contained in:
Christophe 2019-11-16 15:11:01 +01:00 committed by GitHub
commit 8a34283c87
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 285 additions and 12 deletions

View File

@ -103,17 +103,10 @@ namespace glm {
auto MulRow = [&](int l) { auto MulRow = [&](int l) {
float32x4_t const SrcA = m2[l].data; float32x4_t const SrcA = m2[l].data;
#if GLM_ARCH & GLM_ARCH_ARMV8_BIT float32x4_t r = neon::mul_lane(m1[0].data, SrcA, 0);
float32x4_t r= vmulq_laneq_f32(m1[0].data, SrcA, 0); r = neon::madd_lane(r, m1[1].data, SrcA, 1);
r = vaddq_f32(r, vmulq_laneq_f32(m1[1].data, SrcA, 1)); r = neon::madd_lane(r, m1[2].data, SrcA, 2);
r = vaddq_f32(r, vmulq_laneq_f32(m1[2].data, SrcA, 2)); r = neon::madd_lane(r, m1[3].data, SrcA, 3);
r = vaddq_f32(r, vmulq_laneq_f32(m1[3].data, SrcA, 3));
#else
float32x4_t r= vmulq_f32(m1[0].data, vdupq_n_f32(vgetq_lane_f32(SrcA, 0)));
r = vaddq_f32(r, vmulq_f32(m1[1].data, vdupq_n_f32(vgetq_lane_f32(SrcA, 1))));
r = vaddq_f32(r, vmulq_f32(m1[2].data, vdupq_n_f32(vgetq_lane_f32(SrcA, 2))));
r = vaddq_f32(r, vmulq_f32(m1[3].data, vdupq_n_f32(vgetq_lane_f32(SrcA, 3))));
#endif
return r; return r;
}; };
@ -127,5 +120,130 @@ namespace glm {
return Result; return Result;
} }
#endif // CXX11 #endif // CXX11
template<qualifier Q>
struct detail::compute_inverse<4, 4, float, Q, true>
{
GLM_FUNC_QUALIFIER static mat<4, 4, float, Q> call(mat<4, 4, float, Q> const& m)
{
float32x4_t const& m0 = m[0].data;
float32x4_t const& m1 = m[1].data;
float32x4_t const& m2 = m[2].data;
float32x4_t const& m3 = m[3].data;
// m[2][2] * m[3][3] - m[3][2] * m[2][3];
// m[2][2] * m[3][3] - m[3][2] * m[2][3];
// m[1][2] * m[3][3] - m[3][2] * m[1][3];
// m[1][2] * m[2][3] - m[2][2] * m[1][3];
float32x4_t Fac0;
{
float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 2), neon::dup_lane(m1, 2));
float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 3), 3, m2, 3);
float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 2), 3, m2, 2);
float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 3), neon::dup_lane(m1, 3));
Fac0 = w0 * w1 - w2 * w3;
}
// m[2][1] * m[3][3] - m[3][1] * m[2][3];
// m[2][1] * m[3][3] - m[3][1] * m[2][3];
// m[1][1] * m[3][3] - m[3][1] * m[1][3];
// m[1][1] * m[2][3] - m[2][1] * m[1][3];
float32x4_t Fac1;
{
float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 1), neon::dup_lane(m1, 1));
float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 3), 3, m2, 3);
float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 1), 3, m2, 1);
float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 3), neon::dup_lane(m1, 3));
Fac1 = w0 * w1 - w2 * w3;
}
// m[2][1] * m[3][2] - m[3][1] * m[2][2];
// m[2][1] * m[3][2] - m[3][1] * m[2][2];
// m[1][1] * m[3][2] - m[3][1] * m[1][2];
// m[1][1] * m[2][2] - m[2][1] * m[1][2];
float32x4_t Fac2;
{
float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 1), neon::dup_lane(m1, 1));
float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 2), 3, m2, 2);
float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 1), 3, m2, 1);
float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 2), neon::dup_lane(m1, 2));
Fac2 = w0 * w1 - w2 * w3;
}
// m[2][0] * m[3][3] - m[3][0] * m[2][3];
// m[2][0] * m[3][3] - m[3][0] * m[2][3];
// m[1][0] * m[3][3] - m[3][0] * m[1][3];
// m[1][0] * m[2][3] - m[2][0] * m[1][3];
float32x4_t Fac3;
{
float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 0), neon::dup_lane(m1, 0));
float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 3), 3, m2, 3);
float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 0), 3, m2, 0);
float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 3), neon::dup_lane(m1, 3));
Fac3 = w0 * w1 - w2 * w3;
}
// m[2][0] * m[3][2] - m[3][0] * m[2][2];
// m[2][0] * m[3][2] - m[3][0] * m[2][2];
// m[1][0] * m[3][2] - m[3][0] * m[1][2];
// m[1][0] * m[2][2] - m[2][0] * m[1][2];
float32x4_t Fac4;
{
float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 0), neon::dup_lane(m1, 0));
float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 2), 3, m2, 2);
float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 0), 3, m2, 0);
float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 2), neon::dup_lane(m1, 2));
Fac4 = w0 * w1 - w2 * w3;
}
// m[2][0] * m[3][1] - m[3][0] * m[2][1];
// m[2][0] * m[3][1] - m[3][0] * m[2][1];
// m[1][0] * m[3][1] - m[3][0] * m[1][1];
// m[1][0] * m[2][1] - m[2][0] * m[1][1];
float32x4_t Fac5;
{
float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 0), neon::dup_lane(m1, 0));
float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 1), 3, m2, 1);
float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 0), 3, m2, 0);
float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 1), neon::dup_lane(m1, 1));
Fac5 = w0 * w1 - w2 * w3;
}
float32x4_t Vec0 = neon::copy_lane(neon::dupq_lane(m0, 0), 0, m1, 0); // (m[1][0], m[0][0], m[0][0], m[0][0]);
float32x4_t Vec1 = neon::copy_lane(neon::dupq_lane(m0, 1), 0, m1, 1); // (m[1][1], m[0][1], m[0][1], m[0][1]);
float32x4_t Vec2 = neon::copy_lane(neon::dupq_lane(m0, 2), 0, m1, 2); // (m[1][2], m[0][2], m[0][2], m[0][2]);
float32x4_t Vec3 = neon::copy_lane(neon::dupq_lane(m0, 3), 0, m1, 3); // (m[1][3], m[0][3], m[0][3], m[0][3]);
float32x4_t Inv0 = Vec1 * Fac0 - Vec2 * Fac1 + Vec3 * Fac2;
float32x4_t Inv1 = Vec0 * Fac0 - Vec2 * Fac3 + Vec3 * Fac4;
float32x4_t Inv2 = Vec0 * Fac1 - Vec1 * Fac3 + Vec3 * Fac5;
float32x4_t Inv3 = Vec0 * Fac2 - Vec1 * Fac4 + Vec2 * Fac5;
float32x4_t r0 = float32x4_t{-1, +1, -1, +1} * Inv0;
float32x4_t r1 = float32x4_t{+1, -1, +1, -1} * Inv1;
float32x4_t r2 = float32x4_t{-1, +1, -1, +1} * Inv2;
float32x4_t r3 = float32x4_t{+1, -1, +1, -1} * Inv3;
float32x4_t det = neon::mul_lane(r0, m0, 0);
det = neon::madd_lane(det, r1, m0, 1);
det = neon::madd_lane(det, r2, m0, 2);
det = neon::madd_lane(det, r3, m0, 3);
float32x4_t rdet = vdupq_n_f32(1 / vgetq_lane_f32(det, 0));
mat<4, 4, float, Q> r;
r[0].data = vmulq_f32(r0, rdet);
r[1].data = vmulq_f32(r1, rdet);
r[2].data = vmulq_f32(r2, rdet);
r[3].data = vmulq_f32(r3, rdet);
return r;
}
};
}//namespace glm }//namespace glm
#endif #endif

155
glm/simd/neon.h Normal file
View File

@ -0,0 +1,155 @@
/// @ref simd_neon
/// @file glm/simd/neon.h
#pragma once
#if GLM_ARCH & GLM_ARCH_NEON_BIT
#include <arm_neon.h>
namespace glm {
namespace neon {
static float32x4_t dupq_lane(float32x4_t vsrc, int lane) {
switch(lane) {
#if GLM_ARCH & GLM_ARCH_ARMV8_BIT
case 0: return vdupq_laneq_f32(vsrc, 0);
case 1: return vdupq_laneq_f32(vsrc, 1);
case 2: return vdupq_laneq_f32(vsrc, 2);
case 3: return vdupq_laneq_f32(vsrc, 3);
#else
case 0: return vdupq_n_f32(vgetq_lane_f32(vsrc, 0));
case 1: return vdupq_n_f32(vgetq_lane_f32(vsrc, 1));
case 2: return vdupq_n_f32(vgetq_lane_f32(vsrc, 2));
case 3: return vdupq_n_f32(vgetq_lane_f32(vsrc, 3));
#endif
}
assert(!"Unreachable code executed!");
return vdupq_n_f32(0.0f);
}
static float32x2_t dup_lane(float32x4_t vsrc, int lane) {
switch(lane) {
#if GLM_ARCH & GLM_ARCH_ARMV8_BIT
case 0: return vdup_laneq_f32(vsrc, 0);
case 1: return vdup_laneq_f32(vsrc, 1);
case 2: return vdup_laneq_f32(vsrc, 2);
case 3: return vdup_laneq_f32(vsrc, 3);
#else
case 0: return vdup_n_f32(vgetq_lane_f32(vsrc, 0));
case 1: return vdup_n_f32(vgetq_lane_f32(vsrc, 1));
case 2: return vdup_n_f32(vgetq_lane_f32(vsrc, 2));
case 3: return vdup_n_f32(vgetq_lane_f32(vsrc, 3));
#endif
}
assert(!"Unreachable code executed!");
return vdup_n_f32(0.0f);
}
static float32x4_t copy_lane(float32x4_t vdst, int dlane, float32x4_t vsrc, int slane) {
#if GLM_ARCH & GLM_ARCH_ARMV8_BIT
switch(dlane) {
case 0:
switch(slane) {
case 0: return vcopyq_laneq_f32(vdst, 0, vsrc, 0);
case 1: return vcopyq_laneq_f32(vdst, 0, vsrc, 1);
case 2: return vcopyq_laneq_f32(vdst, 0, vsrc, 2);
case 3: return vcopyq_laneq_f32(vdst, 0, vsrc, 3);
}
assert(!"Unreachable code executed!");
case 1:
switch(slane) {
case 0: return vcopyq_laneq_f32(vdst, 1, vsrc, 0);
case 1: return vcopyq_laneq_f32(vdst, 1, vsrc, 1);
case 2: return vcopyq_laneq_f32(vdst, 1, vsrc, 2);
case 3: return vcopyq_laneq_f32(vdst, 1, vsrc, 3);
}
assert(!"Unreachable code executed!");
case 2:
switch(slane) {
case 0: return vcopyq_laneq_f32(vdst, 2, vsrc, 0);
case 1: return vcopyq_laneq_f32(vdst, 2, vsrc, 1);
case 2: return vcopyq_laneq_f32(vdst, 2, vsrc, 2);
case 3: return vcopyq_laneq_f32(vdst, 2, vsrc, 3);
}
assert(!"Unreachable code executed!");
case 3:
switch(slane) {
case 0: return vcopyq_laneq_f32(vdst, 3, vsrc, 0);
case 1: return vcopyq_laneq_f32(vdst, 3, vsrc, 1);
case 2: return vcopyq_laneq_f32(vdst, 3, vsrc, 2);
case 3: return vcopyq_laneq_f32(vdst, 3, vsrc, 3);
}
assert(!"Unreachable code executed!");
}
#else
float l;
switch(slane) {
case 0: l = vgetq_lane_f32(vsrc, 0); break;
case 1: l = vgetq_lane_f32(vsrc, 1); break;
case 2: l = vgetq_lane_f32(vsrc, 2); break;
case 3: l = vgetq_lane_f32(vsrc, 3); break;
default:
assert(!"Unreachable code executed!");
}
switch(dlane) {
case 0: return vsetq_lane_f32(l, vdst, 0);
case 1: return vsetq_lane_f32(l, vdst, 1);
case 2: return vsetq_lane_f32(l, vdst, 2);
case 3: return vsetq_lane_f32(l, vdst, 3);
}
#endif
assert(!"Unreachable code executed!");
return vdupq_n_f32(0.0f);
}
static float32x4_t mul_lane(float32x4_t v, float32x4_t vlane, int lane) {
#if GLM_ARCH & GLM_ARCH_ARMV8_BIT
switch(lane) {
case 0: return vmulq_laneq_f32(v, vlane, 0); break;
case 1: return vmulq_laneq_f32(v, vlane, 1); break;
case 2: return vmulq_laneq_f32(v, vlane, 2); break;
case 3: return vmulq_laneq_f32(v, vlane, 3); break;
default:
assert(!"Unreachable code executed!");
}
assert(!"Unreachable code executed!");
return vdupq_n_f32(0.0f);
#else
return vmulq_f32(v, dupq_lane(vlane, lane));
#endif
}
static float32x4_t madd_lane(float32x4_t acc, float32x4_t v, float32x4_t vlane, int lane) {
#if GLM_ARCH & GLM_ARCH_ARMV8_BIT
#ifdef GLM_CONFIG_FORCE_FMA
# define FMADD_LANE(acc, x, y, L) do { asm volatile ("fmla %0.4s, %1.4s, %2.4s" : "+w"(acc) : "w"(x), "w"(dup_lane(y, L))); } while(0)
#else
# define FMADD_LANE(acc, x, y, L) do { acc = vmlaq_laneq_f32(acc, x, y, L); } while(0)
#endif
switch(lane) {
case 0:
FMADD_LANE(acc, v, vlane, 0);
return acc;
case 1:
FMADD_LANE(acc, v, vlane, 1);
return acc;
case 2:
FMADD_LANE(acc, v, vlane, 2);
return acc;
case 3:
FMADD_LANE(acc, v, vlane, 3);
return acc;
default:
assert(!"Unreachable code executed!");
}
assert(!"Unreachable code executed!");
return vdupq_n_f32(0.0f);
# undef FMADD_LANE
#else
return vaddq_f32(acc, vmulq_f32(v, dupq_lane(vlane, lane)));
#endif
}
} //namespace neon
} // namespace glm
#endif // GLM_ARCH & GLM_ARCH_NEON_BIT

View File

@ -364,7 +364,7 @@
#elif GLM_ARCH & GLM_ARCH_SSE2_BIT #elif GLM_ARCH & GLM_ARCH_SSE2_BIT
# include <emmintrin.h> # include <emmintrin.h>
#elif GLM_ARCH & GLM_ARCH_NEON_BIT #elif GLM_ARCH & GLM_ARCH_NEON_BIT
# include <arm_neon.h> # include "neon.h"
#endif//GLM_ARCH #endif//GLM_ARCH
#if GLM_ARCH & GLM_ARCH_SSE2_BIT #if GLM_ARCH & GLM_ARCH_SSE2_BIT