mirror of
https://sourceware.org/git/glibc.git
synced 2024-12-27 05:00:15 +00:00
AArch64: Improve codegen in AdvSIMD pow
Remove spurious ADRP. Improve memory access by shuffling constants and using more indexed MLAs. A few more optimisation with no impact on accuracy - force fmas contraction - switch from shift-aided rint to rint instruction Between 1 and 5% throughput improvement on Neoverse V1 depending on benchmark.
This commit is contained in:
parent
b602f60f5e
commit
569cfaaf49
@ -22,9 +22,6 @@
|
||||
/* Defines parameters of the approximation and scalar fallback. */
|
||||
#include "finite_pow.h"
|
||||
|
||||
#define VecSmallExp v_u64 (SmallExp)
|
||||
#define VecThresExp v_u64 (ThresExp)
|
||||
|
||||
#define VecSmallPowX v_u64 (SmallPowX)
|
||||
#define VecThresPowX v_u64 (ThresPowX)
|
||||
#define VecSmallPowY v_u64 (SmallPowY)
|
||||
@ -32,36 +29,48 @@
|
||||
|
||||
static const struct data
|
||||
{
|
||||
float64x2_t log_poly[6];
|
||||
float64x2_t exp_poly[3];
|
||||
float64x2_t ln2_hi, ln2_lo;
|
||||
float64x2_t shift, inv_ln2_n, ln2_hi_n, ln2_lo_n, small_powx;
|
||||
uint64x2_t inf;
|
||||
float64x2_t small_powx;
|
||||
uint64x2_t offset, mask;
|
||||
uint64x2_t mask_sub_0, mask_sub_1;
|
||||
float64x2_t log_c0, log_c2, log_c4, log_c5;
|
||||
double log_c1, log_c3;
|
||||
double ln2_lo, ln2_hi;
|
||||
uint64x2_t small_exp, thres_exp;
|
||||
double ln2_lo_n, ln2_hi_n;
|
||||
double inv_ln2_n, exp_c2;
|
||||
float64x2_t exp_c0, exp_c1;
|
||||
} data = {
|
||||
/* Power threshold. */
|
||||
.inf = V2 (0x7ff0000000000000),
|
||||
.small_powx = V2 (0x1p-126),
|
||||
.offset = V2 (Off),
|
||||
.mask = V2 (0xfffULL << 52),
|
||||
.mask_sub_0 = V2 (1ULL << 52),
|
||||
.mask_sub_1 = V2 (52ULL << 52),
|
||||
/* Coefficients copied from v_pow_log_data.c
|
||||
relative error: 0x1.11922ap-70 in [-0x1.6bp-8, 0x1.6bp-8]
|
||||
Coefficients are scaled to match the scaling during evaluation. */
|
||||
.log_poly
|
||||
= { V2 (0x1.555555555556p-2 * -2), V2 (-0x1.0000000000006p-2 * -2),
|
||||
V2 (0x1.999999959554ep-3 * 4), V2 (-0x1.555555529a47ap-3 * 4),
|
||||
V2 (0x1.2495b9b4845e9p-3 * -8), V2 (-0x1.0002b8b263fc3p-3 * -8) },
|
||||
.ln2_hi = V2 (0x1.62e42fefa3800p-1),
|
||||
.ln2_lo = V2 (0x1.ef35793c76730p-45),
|
||||
.log_c0 = V2 (0x1.555555555556p-2 * -2),
|
||||
.log_c1 = -0x1.0000000000006p-2 * -2,
|
||||
.log_c2 = V2 (0x1.999999959554ep-3 * 4),
|
||||
.log_c3 = -0x1.555555529a47ap-3 * 4,
|
||||
.log_c4 = V2 (0x1.2495b9b4845e9p-3 * -8),
|
||||
.log_c5 = V2 (-0x1.0002b8b263fc3p-3 * -8),
|
||||
.ln2_hi = 0x1.62e42fefa3800p-1,
|
||||
.ln2_lo = 0x1.ef35793c76730p-45,
|
||||
/* Polynomial coefficients: abs error: 1.43*2^-58, ulp error: 0.549
|
||||
(0.550 without fma) if |x| < ln2/512. */
|
||||
.exp_poly = { V2 (0x1.fffffffffffd4p-2), V2 (0x1.5555571d6ef9p-3),
|
||||
V2 (0x1.5555576a5adcep-5) },
|
||||
.shift = V2 (0x1.8p52), /* round to nearest int. without intrinsics. */
|
||||
.inv_ln2_n = V2 (0x1.71547652b82fep8), /* N/ln2. */
|
||||
.ln2_hi_n = V2 (0x1.62e42fefc0000p-9), /* ln2/N. */
|
||||
.ln2_lo_n = V2 (-0x1.c610ca86c3899p-45),
|
||||
.small_powx = V2 (0x1p-126),
|
||||
.inf = V2 (0x7ff0000000000000)
|
||||
.exp_c0 = V2 (0x1.fffffffffffd4p-2),
|
||||
.exp_c1 = V2 (0x1.5555571d6ef9p-3),
|
||||
.exp_c2 = 0x1.5555576a5adcep-5,
|
||||
.small_exp = V2 (0x3c90000000000000),
|
||||
.thres_exp = V2 (0x03f0000000000000),
|
||||
.inv_ln2_n = 0x1.71547652b82fep8, /* N/ln2. */
|
||||
.ln2_hi_n = 0x1.62e42fefc0000p-9, /* ln2/N. */
|
||||
.ln2_lo_n = -0x1.c610ca86c3899p-45,
|
||||
};
|
||||
|
||||
#define A(i) data.log_poly[i]
|
||||
#define C(i) data.exp_poly[i]
|
||||
|
||||
/* This version implements an algorithm close to scalar pow but
|
||||
- does not implement the trick in the exp's specialcase subroutine to avoid
|
||||
double-rounding,
|
||||
@ -91,10 +100,9 @@ v_log_inline (uint64x2_t ix, float64x2_t *tail, const struct data *d)
|
||||
/* x = 2^k z; where z is in range [OFF,2*OFF) and exact.
|
||||
The range is split into N subintervals.
|
||||
The ith subinterval contains z and c is near its center. */
|
||||
uint64x2_t tmp = vsubq_u64 (ix, v_u64 (Off));
|
||||
int64x2_t k
|
||||
= vshrq_n_s64 (vreinterpretq_s64_u64 (tmp), 52); /* arithmetic shift. */
|
||||
uint64x2_t iz = vsubq_u64 (ix, vandq_u64 (tmp, v_u64 (0xfffULL << 52)));
|
||||
uint64x2_t tmp = vsubq_u64 (ix, d->offset);
|
||||
int64x2_t k = vshrq_n_s64 (vreinterpretq_s64_u64 (tmp), 52);
|
||||
uint64x2_t iz = vsubq_u64 (ix, vandq_u64 (tmp, d->mask));
|
||||
float64x2_t z = vreinterpretq_f64_u64 (iz);
|
||||
float64x2_t kd = vcvtq_f64_s64 (k);
|
||||
/* log(x) = k*Ln2 + log(c) + log1p(z/c-1). */
|
||||
@ -105,9 +113,10 @@ v_log_inline (uint64x2_t ix, float64x2_t *tail, const struct data *d)
|
||||
|z/c - 1| < 1/N, so r = z/c - 1 is exactly representible. */
|
||||
float64x2_t r = vfmaq_f64 (v_f64 (-1.0), z, invc);
|
||||
/* k*Ln2 + log(c) + r. */
|
||||
float64x2_t t1 = vfmaq_f64 (logc, kd, d->ln2_hi);
|
||||
float64x2_t ln2 = vld1q_f64 (&d->ln2_lo);
|
||||
float64x2_t t1 = vfmaq_laneq_f64 (logc, kd, ln2, 1);
|
||||
float64x2_t t2 = vaddq_f64 (t1, r);
|
||||
float64x2_t lo1 = vfmaq_f64 (logctail, kd, d->ln2_lo);
|
||||
float64x2_t lo1 = vfmaq_laneq_f64 (logctail, kd, ln2, 0);
|
||||
float64x2_t lo2 = vaddq_f64 (vsubq_f64 (t1, t2), r);
|
||||
/* Evaluation is optimized assuming superscalar pipelined execution. */
|
||||
float64x2_t ar = vmulq_f64 (v_f64 (-0.5), r);
|
||||
@ -118,9 +127,10 @@ v_log_inline (uint64x2_t ix, float64x2_t *tail, const struct data *d)
|
||||
float64x2_t lo3 = vfmaq_f64 (vnegq_f64 (ar2), ar, r);
|
||||
float64x2_t lo4 = vaddq_f64 (vsubq_f64 (t2, hi), ar2);
|
||||
/* p = log1p(r) - r - A[0]*r*r. */
|
||||
float64x2_t a56 = vfmaq_f64 (A (4), r, A (5));
|
||||
float64x2_t a34 = vfmaq_f64 (A (2), r, A (3));
|
||||
float64x2_t a12 = vfmaq_f64 (A (0), r, A (1));
|
||||
float64x2_t odd_coeffs = vld1q_f64 (&d->log_c1);
|
||||
float64x2_t a56 = vfmaq_f64 (d->log_c4, r, d->log_c5);
|
||||
float64x2_t a34 = vfmaq_laneq_f64 (d->log_c2, r, odd_coeffs, 1);
|
||||
float64x2_t a12 = vfmaq_laneq_f64 (d->log_c0, r, odd_coeffs, 0);
|
||||
float64x2_t p = vfmaq_f64 (a34, ar2, a56);
|
||||
p = vfmaq_f64 (a12, ar2, p);
|
||||
p = vmulq_f64 (ar3, p);
|
||||
@ -140,28 +150,28 @@ exp_special_case (float64x2_t x, float64x2_t xtail)
|
||||
|
||||
/* Computes sign*exp(x+xtail) where |xtail| < 2^-8/N and |xtail| <= |x|. */
|
||||
static inline float64x2_t
|
||||
v_exp_inline (float64x2_t x, float64x2_t xtail, const struct data *d)
|
||||
v_exp_inline (float64x2_t x, float64x2_t neg_xtail, const struct data *d)
|
||||
{
|
||||
/* Fallback to scalar exp_inline for all lanes if any lane
|
||||
contains value of x s.t. |x| <= 2^-54 or >= 512. */
|
||||
uint64x2_t abstop
|
||||
= vshrq_n_u64 (vandq_u64 (vreinterpretq_u64_f64 (x), d->inf), 52);
|
||||
uint64x2_t uoflowx
|
||||
= vcgeq_u64 (vsubq_u64 (abstop, VecSmallExp), VecThresExp);
|
||||
uint64x2_t uoflowx = vcgeq_u64 (
|
||||
vsubq_u64 (vreinterpretq_u64_f64 (vabsq_f64 (x)), d->small_exp),
|
||||
d->thres_exp);
|
||||
if (__glibc_unlikely (v_any_u64 (uoflowx)))
|
||||
return exp_special_case (x, xtail);
|
||||
return exp_special_case (x, vnegq_f64 (neg_xtail));
|
||||
|
||||
/* exp(x) = 2^(k/N) * exp(r), with exp(r) in [2^(-1/2N),2^(1/2N)]. */
|
||||
/* x = ln2/N*k + r, with k integer and r in [-ln2/2N, ln2/2N]. */
|
||||
float64x2_t z = vmulq_f64 (d->inv_ln2_n, x);
|
||||
/* z - kd is in [-1, 1] in non-nearest rounding modes. */
|
||||
float64x2_t kd = vaddq_f64 (z, d->shift);
|
||||
uint64x2_t ki = vreinterpretq_u64_f64 (kd);
|
||||
kd = vsubq_f64 (kd, d->shift);
|
||||
float64x2_t r = vfmsq_f64 (x, kd, d->ln2_hi_n);
|
||||
r = vfmsq_f64 (r, kd, d->ln2_lo_n);
|
||||
float64x2_t exp_consts = vld1q_f64 (&d->inv_ln2_n);
|
||||
float64x2_t z = vmulq_laneq_f64 (x, exp_consts, 0);
|
||||
float64x2_t kd = vrndnq_f64 (z);
|
||||
uint64x2_t ki = vreinterpretq_u64_s64 (vcvtaq_s64_f64 (z));
|
||||
float64x2_t ln2_n = vld1q_f64 (&d->ln2_lo_n);
|
||||
float64x2_t r = vfmsq_laneq_f64 (x, kd, ln2_n, 1);
|
||||
r = vfmsq_laneq_f64 (r, kd, ln2_n, 0);
|
||||
/* The code assumes 2^-200 < |xtail| < 2^-8/N. */
|
||||
r = vaddq_f64 (r, xtail);
|
||||
r = vsubq_f64 (r, neg_xtail);
|
||||
/* 2^(k/N) ~= scale. */
|
||||
uint64x2_t idx = vandq_u64 (ki, v_u64 (N_EXP - 1));
|
||||
uint64x2_t top = vshlq_n_u64 (ki, 52 - V_POW_EXP_TABLE_BITS);
|
||||
@ -170,8 +180,8 @@ v_exp_inline (float64x2_t x, float64x2_t xtail, const struct data *d)
|
||||
sbits = vaddq_u64 (sbits, top);
|
||||
/* exp(x) = 2^(k/N) * exp(r) ~= scale + scale * (exp(r) - 1). */
|
||||
float64x2_t r2 = vmulq_f64 (r, r);
|
||||
float64x2_t tmp = vfmaq_f64 (C (1), r, C (2));
|
||||
tmp = vfmaq_f64 (C (0), r, tmp);
|
||||
float64x2_t tmp = vfmaq_laneq_f64 (d->exp_c1, r, exp_consts, 1);
|
||||
tmp = vfmaq_f64 (d->exp_c0, r, tmp);
|
||||
tmp = vfmaq_f64 (r, r2, tmp);
|
||||
float64x2_t scale = vreinterpretq_f64_u64 (sbits);
|
||||
/* Note: tmp == 0 or |tmp| > 2^-200 and scale > 2^-739, so there
|
||||
@ -230,8 +240,8 @@ float64x2_t VPCS_ATTR V_NAME_D2 (pow) (float64x2_t x, float64x2_t y)
|
||||
{
|
||||
/* Normalize subnormal x so exponent becomes negative. */
|
||||
uint64x2_t vix_norm = vreinterpretq_u64_f64 (
|
||||
vabsq_f64 (vmulq_f64 (x, vcvtq_f64_u64 (v_u64 (1ULL << 52)))));
|
||||
vix_norm = vsubq_u64 (vix_norm, v_u64 (52ULL << 52));
|
||||
vabsq_f64 (vmulq_f64 (x, vcvtq_f64_u64 (d->mask_sub_0))));
|
||||
vix_norm = vsubq_u64 (vix_norm, d->mask_sub_1);
|
||||
vix = vbslq_u64 (sub_x, vix_norm, vix);
|
||||
}
|
||||
}
|
||||
@ -242,8 +252,7 @@ float64x2_t VPCS_ATTR V_NAME_D2 (pow) (float64x2_t x, float64x2_t y)
|
||||
|
||||
/* Vector Exp(y_loghi, y_loglo). */
|
||||
float64x2_t vehi = vmulq_f64 (y, vhi);
|
||||
float64x2_t velo = vmulq_f64 (y, vlo);
|
||||
float64x2_t vemi = vfmsq_f64 (vehi, y, vhi);
|
||||
velo = vsubq_f64 (velo, vemi);
|
||||
return v_exp_inline (vehi, velo, d);
|
||||
float64x2_t neg_velo = vfmsq_f64 (vemi, y, vlo);
|
||||
return v_exp_inline (vehi, neg_velo, d);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user