glibc/sysdeps/ieee754/flt-32/k_rem_pio2f.c

209 lines
5.5 KiB
C
Raw Normal View History

/* k_rem_pio2f.c -- float version of k_rem_pio2.c
* Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
*/
/*
* ====================================================
* Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
*
* Developed at SunPro, a Sun Microsystems, Inc. business.
* Permission to use, copy, modify, and distribute this
2013-06-05 20:44:03 +00:00
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#if defined(LIBM_SCCS) && !defined(lint)
static char rcsid[] = "$NetBSD: k_rem_pio2f.c,v 1.4 1995/05/10 20:46:28 jtc Exp $";
#endif
#include <math.h>
#include <math_private.h>
Narrowing the visibility of libc-internal.h even further. posix/wordexp-test.c used libc-internal.h for PTR_ALIGN_DOWN; similar to what was done with libc-diag.h, I have split the definitions of cast_to_integer, ALIGN_UP, ALIGN_DOWN, PTR_ALIGN_UP, and PTR_ALIGN_DOWN to a new header, libc-pointer-arith.h. It then occurred to me that the remaining declarations in libc-internal.h are mostly to do with early initialization, and probably most of the files including it, even in the core code, don't need it anymore. Indeed, only 19 files actually need what remains of libc-internal.h. 23 others need libc-diag.h instead, and 12 need libc-pointer-arith.h instead. No file needs more than one of them, and 16 don't need any of them! So, with this patch, libc-internal.h stops including libc-diag.h as well as losing the pointer arithmetic macros, and all including files are adjusted. * include/libc-pointer-arith.h: New file. Define cast_to_integer, ALIGN_UP, ALIGN_DOWN, PTR_ALIGN_UP, and PTR_ALIGN_DOWN here. * include/libc-internal.h: Definitions of above macros moved from here. Don't include libc-diag.h anymore either. * posix/wordexp-test.c: Include stdint.h and libc-pointer-arith.h. Don't include libc-internal.h. * debug/pcprofile.c, elf/dl-tunables.c, elf/soinit.c, io/openat.c * io/openat64.c, misc/ptrace.c, nptl/pthread_clock_gettime.c * nptl/pthread_clock_settime.c, nptl/pthread_cond_common.c * string/strcoll_l.c, sysdeps/nacl/brk.c * sysdeps/unix/clock_settime.c * sysdeps/unix/sysv/linux/i386/get_clockfreq.c * sysdeps/unix/sysv/linux/ia64/get_clockfreq.c * sysdeps/unix/sysv/linux/powerpc/get_clockfreq.c * sysdeps/unix/sysv/linux/sparc/sparc64/get_clockfreq.c: Don't include libc-internal.h. * elf/get-dynamic-info.h, iconv/loop.c * iconvdata/iso-2022-cn-ext.c, locale/weight.h, locale/weightwc.h * misc/reboot.c, nis/nis_table.c, nptl_db/thread_dbP.h * nscd/connections.c, resolv/res_send.c, soft-fp/fmadf4.c * soft-fp/fmasf4.c, soft-fp/fmatf4.c, stdio-common/vfscanf.c * sysdeps/ieee754/dbl-64/e_lgamma_r.c * sysdeps/ieee754/dbl-64/k_rem_pio2.c * sysdeps/ieee754/flt-32/e_lgammaf_r.c * sysdeps/ieee754/flt-32/k_rem_pio2f.c * sysdeps/ieee754/ldbl-128/k_tanl.c * sysdeps/ieee754/ldbl-128ibm/k_tanl.c * sysdeps/ieee754/ldbl-96/e_lgammal_r.c * sysdeps/ieee754/ldbl-96/k_tanl.c, sysdeps/nptl/futex-internal.h: Include libc-diag.h instead of libc-internal.h. * elf/dl-load.c, elf/dl-reloc.c, locale/programs/locarchive.c * nptl/nptl-init.c, string/strcspn.c, string/strspn.c * malloc/malloc.c, sysdeps/i386/nptl/tls.h * sysdeps/nacl/dl-map-segments.h, sysdeps/x86_64/atomic-machine.h * sysdeps/unix/sysv/linux/spawni.c * sysdeps/x86_64/nptl/tls.h: Include libc-pointer-arith.h instead of libc-internal.h. * elf/get-dynamic-info.h, sysdeps/nacl/dl-map-segments.h * sysdeps/x86_64/atomic-machine.h: Add multiple include guard.
2017-02-27 01:17:52 +00:00
#include <libc-diag.h>
/* In the float version, the input parameter x contains 8 bit
integers, not 24 bit integers. 113 bit precision is not supported. */
static const int init_jk[] = {4,7,9}; /* initial value for jk */
static const float PIo2[] = {
1.5703125000e+00, /* 0x3fc90000 */
4.5776367188e-04, /* 0x39f00000 */
2.5987625122e-05, /* 0x37da0000 */
7.5437128544e-08, /* 0x33a20000 */
6.0026650317e-11, /* 0x2e840000 */
7.3896444519e-13, /* 0x2b500000 */
5.3845816694e-15, /* 0x27c20000 */
5.6378512969e-18, /* 0x22d00000 */
8.3009228831e-20, /* 0x1fc40000 */
3.2756352257e-22, /* 0x1bc60000 */
6.3331015649e-25, /* 0x17440000 */
};
2013-06-05 20:44:03 +00:00
static const float
zero = 0.0,
one = 1.0,
two8 = 2.5600000000e+02, /* 0x43800000 */
twon8 = 3.9062500000e-03; /* 0x3b800000 */
int __kernel_rem_pio2f(float *x, float *y, int e0, int nx, int prec, const int32_t *ipio2)
{
int32_t jz,jx,jv,jp,jk,carry,n,iq[20],i,j,k,m,q0,ih;
float z,fw,f[20],fq[20],q[20];
/* initialize jk*/
jk = init_jk[prec];
jp = jk;
/* determine jx,jv,q0, note that 3>q0 */
jx = nx-1;
jv = (e0-3)/8; if(jv<0) jv=0;
q0 = e0-8*(jv+1);
/* set up f[0] to f[jx+jk] where f[jx+jk] = ipio2[jv+jk] */
j = jv-jx; m = jx+jk;
for(i=0;i<=m;i++,j++) f[i] = (j<0)? zero : (float) ipio2[j];
/* compute q[0],q[1],...q[jk] */
for (i=0;i<=jk;i++) {
for(j=0,fw=0.0;j<=jx;j++)
fw += x[j]*f[jx+i-j];
q[i] = fw;
}
jz = jk;
recompute:
/* distill q[] into iq[] reversingly */
for(i=0,j=jz,z=q[jz];j>0;i++,j--) {
fw = (float)((int32_t)(twon8* z));
iq[i] = (int32_t)(z-two8*fw);
z = q[j-1]+fw;
}
/* compute n */
z = __scalbnf(z,q0); /* actual value of z */
z -= (float)8.0*__floorf(z*(float)0.125); /* trim off integer >= 8 */
n = (int32_t) z;
z -= (float)n;
ih = 0;
if(q0>0) { /* need iq[jz-1] to determine n */
i = (iq[jz-1]>>(8-q0)); n += i;
iq[jz-1] -= i<<(8-q0);
ih = iq[jz-1]>>(7-q0);
2013-06-05 20:44:03 +00:00
}
else if(q0==0) ih = iq[jz-1]>>7;
else if(z>=(float)0.5) ih=2;
if(ih>0) { /* q > 0.5 */
n += 1; carry = 0;
for(i=0;i<jz ;i++) { /* compute 1-q */
j = iq[i];
if(carry==0) {
if(j!=0) {
carry = 1; iq[i] = 0x100- j;
}
} else iq[i] = 0xff - j;
}
if(q0>0) { /* rare case: chance is 1 in 12 */
switch(q0) {
case 1:
2013-06-06 17:36:03 +00:00
iq[jz-1] &= 0x7f; break;
case 2:
iq[jz-1] &= 0x3f; break;
}
}
if(ih==2) {
z = one - z;
if(carry!=0) z -= __scalbnf(one,q0);
}
}
/* check if recomputation is needed */
if(z==zero) {
j = 0;
for (i=jz-1;i>=jk;i--) j |= iq[i];
if(j==0) { /* need recomputation */
/* On s390x gcc 6.1 -O3 produces the warning "array subscript is
below array bounds [-Werror=array-bounds]". Only
__ieee754_rem_pio2f calls __kernel_rem_pio2f for normal
numbers and |x| ~> 2^7*(pi/2). Thus x can't be zero and
ipio2 is not zero, too. Thus not all iq[] values can't be
zero. */
DIAG_PUSH_NEEDS_COMMENT;
DIAG_IGNORE_NEEDS_COMMENT (6.1, "-Warray-bounds");
for(k=1;iq[jk-k]==0;k++); /* k = no. of terms needed */
DIAG_POP_NEEDS_COMMENT;
for(i=jz+1;i<=jz+k;i++) { /* add q[jz+1] to q[jz+k] */
f[jx+i] = (float) ipio2[jv+i];
for(j=0,fw=0.0;j<=jx;j++) fw += x[j]*f[jx+i-j];
q[i] = fw;
}
jz += k;
goto recompute;
}
}
/* chop off zero terms */
if(z==(float)0.0) {
jz -= 1; q0 -= 8;
while(iq[jz]==0) { jz--; q0-=8;}
} else { /* break z into 8-bit if necessary */
z = __scalbnf(z,-q0);
2013-06-05 20:44:03 +00:00
if(z>=two8) {
fw = (float)((int32_t)(twon8*z));
iq[jz] = (int32_t)(z-two8*fw);
jz += 1; q0 += 8;
iq[jz] = (int32_t) fw;
} else iq[jz] = (int32_t) z ;
}
/* convert integer "bit" chunk to floating-point value */
fw = __scalbnf(one,q0);
for(i=jz;i>=0;i--) {
q[i] = fw*(float)iq[i]; fw*=twon8;
}
/* compute PIo2[0,...,jp]*q[jz,...,0] */
for(i=jz;i>=0;i--) {
for(fw=0.0,k=0;k<=jp&&k<=jz-i;k++) fw += PIo2[k]*q[i+k];
fq[jz-i] = fw;
}
/* compress fq[] into y[] */
switch(prec) {
case 0:
fw = 0.0;
for (i=jz;i>=0;i--) fw += fq[i];
2013-06-05 20:44:03 +00:00
y[0] = (ih==0)? fw: -fw;
break;
case 1:
case 2:;
float fv = 0.0;
Use math_narrow_eval more consistently. Where glibc code needs to avoid excess range and precision in floating-point arithmetic, code variously uses either asms or volatile to force the results of that arithmetic to memory; mostly this is conditional on FLT_EVAL_METHOD, but in the case of lrint / llrint functions some use of volatile is unconditional (and is present unnecessarily in versions for long double). This patch make such code use the recently-added math_narrow_eval macro consistently, removing the unnecessary uses of volatile in long double lrint / llrint implementations completely. Tested for x86_64, x86, mips64 and powerpc. * math/s_nexttowardf.c (__nexttowardf): Use math_narrow_eval. * stdlib/strtod_l.c: Include <math_private.h>. (overflow_value): Use math_narrow_eval. (underflow_value): Likewise. * sysdeps/i386/fpu/s_nexttoward.c (__nexttoward): Likewise. * sysdeps/i386/fpu/s_nexttowardf.c (__nexttowardf): Likewise. * sysdeps/ieee754/dbl-64/e_gamma_r.c (gamma_positive): Likewise. (__ieee754_gamma_r): Likewise. * sysdeps/ieee754/dbl-64/gamma_productf.c (__gamma_productf): Likewise. * sysdeps/ieee754/dbl-64/k_rem_pio2.c (__kernel_rem_pio2): Likewise. * sysdeps/ieee754/dbl-64/lgamma_neg.c (__lgamma_neg): Likewise. * sysdeps/ieee754/dbl-64/s_erf.c (__erfc): Likewise. * sysdeps/ieee754/dbl-64/s_llrint.c (__llrint): Likewise. * sysdeps/ieee754/dbl-64/s_lrint.c (__lrint): Likewise. * sysdeps/ieee754/flt-32/e_gammaf_r.c (gammaf_positive): Likewise. (__ieee754_gammaf_r): Likewise. * sysdeps/ieee754/flt-32/k_rem_pio2f.c (__kernel_rem_pio2f): Likewise. * sysdeps/ieee754/flt-32/lgamma_negf.c (__lgamma_negf): Likewise. * sysdeps/ieee754/flt-32/s_erff.c (__erfcf): Likewise. * sysdeps/ieee754/flt-32/s_llrintf.c (__llrintf): Likewise. * sysdeps/ieee754/flt-32/s_lrintf.c (__lrintf): Likewise. * sysdeps/ieee754/ldbl-128/s_llrintl.c (__llrintl): Do not use volatile. * sysdeps/ieee754/ldbl-128/s_lrintl.c (__lrintl): Likewise. * sysdeps/ieee754/ldbl-128/s_nexttoward.c (__nexttoward): Use math_narrow_eval. * sysdeps/ieee754/ldbl-128ibm/s_nexttoward.c (__nexttoward): Likewise. * sysdeps/ieee754/ldbl-128ibm/s_nexttowardf.c (__nexttowardf): Likewise. * sysdeps/ieee754/ldbl-96/gamma_product.c (__gamma_product): Likewise. * sysdeps/ieee754/ldbl-96/s_llrintl.c (__llrintl): Do not use volatile. * sysdeps/ieee754/ldbl-96/s_lrintl.c (__lrintl): Likewise. * sysdeps/ieee754/ldbl-96/s_nexttoward.c (__nexttoward): Use math_narrow_eval. * sysdeps/ieee754/ldbl-96/s_nexttowardf.c (__nexttowardf): Likewise. * sysdeps/ieee754/ldbl-opt/s_nexttowardfd.c (__nldbl_nexttowardf): Likewise.
2015-09-23 18:14:57 +00:00
for (i=jz;i>=0;i--) fv = math_narrow_eval (fv + fq[i]);
y[0] = (ih==0)? fv: -fv;
Use math_narrow_eval more consistently. Where glibc code needs to avoid excess range and precision in floating-point arithmetic, code variously uses either asms or volatile to force the results of that arithmetic to memory; mostly this is conditional on FLT_EVAL_METHOD, but in the case of lrint / llrint functions some use of volatile is unconditional (and is present unnecessarily in versions for long double). This patch make such code use the recently-added math_narrow_eval macro consistently, removing the unnecessary uses of volatile in long double lrint / llrint implementations completely. Tested for x86_64, x86, mips64 and powerpc. * math/s_nexttowardf.c (__nexttowardf): Use math_narrow_eval. * stdlib/strtod_l.c: Include <math_private.h>. (overflow_value): Use math_narrow_eval. (underflow_value): Likewise. * sysdeps/i386/fpu/s_nexttoward.c (__nexttoward): Likewise. * sysdeps/i386/fpu/s_nexttowardf.c (__nexttowardf): Likewise. * sysdeps/ieee754/dbl-64/e_gamma_r.c (gamma_positive): Likewise. (__ieee754_gamma_r): Likewise. * sysdeps/ieee754/dbl-64/gamma_productf.c (__gamma_productf): Likewise. * sysdeps/ieee754/dbl-64/k_rem_pio2.c (__kernel_rem_pio2): Likewise. * sysdeps/ieee754/dbl-64/lgamma_neg.c (__lgamma_neg): Likewise. * sysdeps/ieee754/dbl-64/s_erf.c (__erfc): Likewise. * sysdeps/ieee754/dbl-64/s_llrint.c (__llrint): Likewise. * sysdeps/ieee754/dbl-64/s_lrint.c (__lrint): Likewise. * sysdeps/ieee754/flt-32/e_gammaf_r.c (gammaf_positive): Likewise. (__ieee754_gammaf_r): Likewise. * sysdeps/ieee754/flt-32/k_rem_pio2f.c (__kernel_rem_pio2f): Likewise. * sysdeps/ieee754/flt-32/lgamma_negf.c (__lgamma_negf): Likewise. * sysdeps/ieee754/flt-32/s_erff.c (__erfcf): Likewise. * sysdeps/ieee754/flt-32/s_llrintf.c (__llrintf): Likewise. * sysdeps/ieee754/flt-32/s_lrintf.c (__lrintf): Likewise. * sysdeps/ieee754/ldbl-128/s_llrintl.c (__llrintl): Do not use volatile. * sysdeps/ieee754/ldbl-128/s_lrintl.c (__lrintl): Likewise. * sysdeps/ieee754/ldbl-128/s_nexttoward.c (__nexttoward): Use math_narrow_eval. * sysdeps/ieee754/ldbl-128ibm/s_nexttoward.c (__nexttoward): Likewise. * sysdeps/ieee754/ldbl-128ibm/s_nexttowardf.c (__nexttowardf): Likewise. * sysdeps/ieee754/ldbl-96/gamma_product.c (__gamma_product): Likewise. * sysdeps/ieee754/ldbl-96/s_llrintl.c (__llrintl): Do not use volatile. * sysdeps/ieee754/ldbl-96/s_lrintl.c (__lrintl): Likewise. * sysdeps/ieee754/ldbl-96/s_nexttoward.c (__nexttoward): Use math_narrow_eval. * sysdeps/ieee754/ldbl-96/s_nexttowardf.c (__nexttowardf): Likewise. * sysdeps/ieee754/ldbl-opt/s_nexttowardfd.c (__nldbl_nexttowardf): Likewise.
2015-09-23 18:14:57 +00:00
fv = math_narrow_eval (fq[0]-fv);
for (i=1;i<=jz;i++) fv = math_narrow_eval (fv + fq[i]);
y[1] = (ih==0)? fv: -fv;
break;
case 3: /* painful */
for (i=jz;i>0;i--) {
Use math_narrow_eval more consistently. Where glibc code needs to avoid excess range and precision in floating-point arithmetic, code variously uses either asms or volatile to force the results of that arithmetic to memory; mostly this is conditional on FLT_EVAL_METHOD, but in the case of lrint / llrint functions some use of volatile is unconditional (and is present unnecessarily in versions for long double). This patch make such code use the recently-added math_narrow_eval macro consistently, removing the unnecessary uses of volatile in long double lrint / llrint implementations completely. Tested for x86_64, x86, mips64 and powerpc. * math/s_nexttowardf.c (__nexttowardf): Use math_narrow_eval. * stdlib/strtod_l.c: Include <math_private.h>. (overflow_value): Use math_narrow_eval. (underflow_value): Likewise. * sysdeps/i386/fpu/s_nexttoward.c (__nexttoward): Likewise. * sysdeps/i386/fpu/s_nexttowardf.c (__nexttowardf): Likewise. * sysdeps/ieee754/dbl-64/e_gamma_r.c (gamma_positive): Likewise. (__ieee754_gamma_r): Likewise. * sysdeps/ieee754/dbl-64/gamma_productf.c (__gamma_productf): Likewise. * sysdeps/ieee754/dbl-64/k_rem_pio2.c (__kernel_rem_pio2): Likewise. * sysdeps/ieee754/dbl-64/lgamma_neg.c (__lgamma_neg): Likewise. * sysdeps/ieee754/dbl-64/s_erf.c (__erfc): Likewise. * sysdeps/ieee754/dbl-64/s_llrint.c (__llrint): Likewise. * sysdeps/ieee754/dbl-64/s_lrint.c (__lrint): Likewise. * sysdeps/ieee754/flt-32/e_gammaf_r.c (gammaf_positive): Likewise. (__ieee754_gammaf_r): Likewise. * sysdeps/ieee754/flt-32/k_rem_pio2f.c (__kernel_rem_pio2f): Likewise. * sysdeps/ieee754/flt-32/lgamma_negf.c (__lgamma_negf): Likewise. * sysdeps/ieee754/flt-32/s_erff.c (__erfcf): Likewise. * sysdeps/ieee754/flt-32/s_llrintf.c (__llrintf): Likewise. * sysdeps/ieee754/flt-32/s_lrintf.c (__lrintf): Likewise. * sysdeps/ieee754/ldbl-128/s_llrintl.c (__llrintl): Do not use volatile. * sysdeps/ieee754/ldbl-128/s_lrintl.c (__lrintl): Likewise. * sysdeps/ieee754/ldbl-128/s_nexttoward.c (__nexttoward): Use math_narrow_eval. * sysdeps/ieee754/ldbl-128ibm/s_nexttoward.c (__nexttoward): Likewise. * sysdeps/ieee754/ldbl-128ibm/s_nexttowardf.c (__nexttowardf): Likewise. * sysdeps/ieee754/ldbl-96/gamma_product.c (__gamma_product): Likewise. * sysdeps/ieee754/ldbl-96/s_llrintl.c (__llrintl): Do not use volatile. * sysdeps/ieee754/ldbl-96/s_lrintl.c (__lrintl): Likewise. * sysdeps/ieee754/ldbl-96/s_nexttoward.c (__nexttoward): Use math_narrow_eval. * sysdeps/ieee754/ldbl-96/s_nexttowardf.c (__nexttowardf): Likewise. * sysdeps/ieee754/ldbl-opt/s_nexttowardfd.c (__nldbl_nexttowardf): Likewise.
2015-09-23 18:14:57 +00:00
float fv = math_narrow_eval (fq[i-1]+fq[i]);
fq[i] += fq[i-1]-fv;
fq[i-1] = fv;
}
for (i=jz;i>1;i--) {
Use math_narrow_eval more consistently. Where glibc code needs to avoid excess range and precision in floating-point arithmetic, code variously uses either asms or volatile to force the results of that arithmetic to memory; mostly this is conditional on FLT_EVAL_METHOD, but in the case of lrint / llrint functions some use of volatile is unconditional (and is present unnecessarily in versions for long double). This patch make such code use the recently-added math_narrow_eval macro consistently, removing the unnecessary uses of volatile in long double lrint / llrint implementations completely. Tested for x86_64, x86, mips64 and powerpc. * math/s_nexttowardf.c (__nexttowardf): Use math_narrow_eval. * stdlib/strtod_l.c: Include <math_private.h>. (overflow_value): Use math_narrow_eval. (underflow_value): Likewise. * sysdeps/i386/fpu/s_nexttoward.c (__nexttoward): Likewise. * sysdeps/i386/fpu/s_nexttowardf.c (__nexttowardf): Likewise. * sysdeps/ieee754/dbl-64/e_gamma_r.c (gamma_positive): Likewise. (__ieee754_gamma_r): Likewise. * sysdeps/ieee754/dbl-64/gamma_productf.c (__gamma_productf): Likewise. * sysdeps/ieee754/dbl-64/k_rem_pio2.c (__kernel_rem_pio2): Likewise. * sysdeps/ieee754/dbl-64/lgamma_neg.c (__lgamma_neg): Likewise. * sysdeps/ieee754/dbl-64/s_erf.c (__erfc): Likewise. * sysdeps/ieee754/dbl-64/s_llrint.c (__llrint): Likewise. * sysdeps/ieee754/dbl-64/s_lrint.c (__lrint): Likewise. * sysdeps/ieee754/flt-32/e_gammaf_r.c (gammaf_positive): Likewise. (__ieee754_gammaf_r): Likewise. * sysdeps/ieee754/flt-32/k_rem_pio2f.c (__kernel_rem_pio2f): Likewise. * sysdeps/ieee754/flt-32/lgamma_negf.c (__lgamma_negf): Likewise. * sysdeps/ieee754/flt-32/s_erff.c (__erfcf): Likewise. * sysdeps/ieee754/flt-32/s_llrintf.c (__llrintf): Likewise. * sysdeps/ieee754/flt-32/s_lrintf.c (__lrintf): Likewise. * sysdeps/ieee754/ldbl-128/s_llrintl.c (__llrintl): Do not use volatile. * sysdeps/ieee754/ldbl-128/s_lrintl.c (__lrintl): Likewise. * sysdeps/ieee754/ldbl-128/s_nexttoward.c (__nexttoward): Use math_narrow_eval. * sysdeps/ieee754/ldbl-128ibm/s_nexttoward.c (__nexttoward): Likewise. * sysdeps/ieee754/ldbl-128ibm/s_nexttowardf.c (__nexttowardf): Likewise. * sysdeps/ieee754/ldbl-96/gamma_product.c (__gamma_product): Likewise. * sysdeps/ieee754/ldbl-96/s_llrintl.c (__llrintl): Do not use volatile. * sysdeps/ieee754/ldbl-96/s_lrintl.c (__lrintl): Likewise. * sysdeps/ieee754/ldbl-96/s_nexttoward.c (__nexttoward): Use math_narrow_eval. * sysdeps/ieee754/ldbl-96/s_nexttowardf.c (__nexttowardf): Likewise. * sysdeps/ieee754/ldbl-opt/s_nexttowardfd.c (__nldbl_nexttowardf): Likewise.
2015-09-23 18:14:57 +00:00
float fv = math_narrow_eval (fq[i-1]+fq[i]);
fq[i] += fq[i-1]-fv;
fq[i-1] = fv;
}
2013-06-05 20:44:03 +00:00
for (fw=0.0,i=jz;i>=2;i--) fw += fq[i];
if(ih==0) {
y[0] = fq[0]; y[1] = fq[1]; y[2] = fw;
} else {
y[0] = -fq[0]; y[1] = -fq[1]; y[2] = -fw;
}
}
return n&7;
}