[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Commits] r22745 - in /fsf/trunk/libc: ./ include/ nptl/ stdlib/ sysdeps/ieee754/dbl-64/ sysdeps/powerpc/power4/fpu/
- To: commits@xxxxxxxxxx
- Subject: [Commits] r22745 - in /fsf/trunk/libc: ./ include/ nptl/ stdlib/ sysdeps/ieee754/dbl-64/ sysdeps/powerpc/power4/fpu/
- From: eglibc@xxxxxxxxxx
- Date: Sat, 30 Mar 2013 00:01:58 -0000
Author: eglibc
Date: Sat Mar 30 00:01:57 2013
New Revision: 22745
Log:
Import glibc-mainline for 2013-03-30
Modified:
fsf/trunk/libc/ChangeLog
fsf/trunk/libc/include/stdlib.h
fsf/trunk/libc/nptl/ChangeLog
fsf/trunk/libc/nptl/pthread_create.c
fsf/trunk/libc/stdlib/cxa_thread_atexit_impl.c
fsf/trunk/libc/stdlib/exit.c
fsf/trunk/libc/sysdeps/ieee754/dbl-64/e_atan2.c
fsf/trunk/libc/sysdeps/ieee754/dbl-64/e_log.c
fsf/trunk/libc/sysdeps/ieee754/dbl-64/mpa.h
fsf/trunk/libc/sysdeps/ieee754/dbl-64/mpatan.c
fsf/trunk/libc/sysdeps/ieee754/dbl-64/mpatan2.c
fsf/trunk/libc/sysdeps/ieee754/dbl-64/mpexp.c
fsf/trunk/libc/sysdeps/ieee754/dbl-64/mptan.c
fsf/trunk/libc/sysdeps/ieee754/dbl-64/s_atan.c
fsf/trunk/libc/sysdeps/ieee754/dbl-64/s_tan.c
fsf/trunk/libc/sysdeps/powerpc/power4/fpu/mpa-arch.h
fsf/trunk/libc/sysdeps/powerpc/power4/fpu/mpa.c
Modified: fsf/trunk/libc/ChangeLog
==============================================================================
--- fsf/trunk/libc/ChangeLog (original)
+++ fsf/trunk/libc/ChangeLog Sat Mar 30 00:01:57 2013
@@ -1,3 +1,40 @@
+2013-03-29 Siddhesh Poyarekar <siddhesh@xxxxxxxxxx>
+
+ * sysdeps/powerpc/power4/fpu/mpa-arch.h (INTEGER_OF): Replace
+ ONE with its value.
+
+ * sysdeps/ieee754/dbl-64/mpa.h (ONE, MONE): Remove defines.
+ (__pow_mp): Replace ONE and MONE with their values.
+ * sysdeps/ieee754/dbl-64/e_log.c (__ieee754_log): Likewise.
+ * sysdeps/ieee754/dbl-64/mpatan.c (__mpatan): Likewise.
+ * sysdeps/ieee754/dbl-64/mpatan2.c (__mpatan2): Likewise.
+ * sysdeps/ieee754/dbl-64/mptan.c (__mptan): Likewise.
+ * sysdeps/ieee754/dbl-64/s_atan.c (atan): Likewise.
+ * sysdeps/ieee754/dbl-64/s_tan.c (tan): Likewise.
+
+ * sysdeps/ieee754/dbl-64/s_tan.c: Fix formatting.
+
+ * sysdeps/ieee754/dbl-64/mpa.h (ZERO, MZERO): Remove defines.
+ (__pow_mp): Replace ZERO and MZERO with their values.
+ * sysdeps/ieee754/dbl-64/e_atan2.c (__ieee754_atan2): Likewise.
+ * sysdeps/ieee754/dbl-64/e_log.c (__ieee754_log): Likewise.
+ * sysdeps/ieee754/dbl-64/mpatan2.c (__mpatan2): Likewise.
+ * sysdeps/ieee754/dbl-64/mpexp.c (__mpexp): Likewise.
+ * sysdeps/ieee754/dbl-64/s_atan.c (atan): Likewise.
+ * sysdeps/powerpc/power4/fpu/mpa.c (__mul): Likewise.
+ (__sqr): Likewise.
+
+ * sysdeps/ieee754/dbl-64/s_atan.c: Fix formatting.
+
+ * sysdeps/ieee754/dbl-64/e_log.c: Fix formatting.
+
+2013-03-28 Roland McGrath <roland@xxxxxxxxxxxxx>
+
+ * include/stdlib.h [!SHARED] (__call_tls_dtors):
+ Declare with __attribute__ ((weak)).
+ * stdlib/exit.c (__libc_atexit) [!SHARED]:
+ Call __call_tls_dtors only if it's not NULL.
+
2013-03-28 Roland McGrath <roland@xxxxxxxxxxxxx>
* csu/libc-start.c (__libc_start_main) [!SHARED]: If _dl_aux_init
Modified: fsf/trunk/libc/include/stdlib.h
==============================================================================
--- fsf/trunk/libc/include/stdlib.h (original)
+++ fsf/trunk/libc/include/stdlib.h Sat Mar 30 00:01:57 2013
@@ -102,8 +102,12 @@
extern int __cxa_thread_atexit_impl (void (*func) (void *), void *arg,
void *d);
-extern void __call_tls_dtors (void);
-libc_hidden_proto (__call_tls_dtors);
+extern void __call_tls_dtors (void)
+#ifndef SHARED
+ __attribute__ ((weak))
+#endif
+ ;
+libc_hidden_proto (__call_tls_dtors)
extern void __cxa_finalize (void *d);
Modified: fsf/trunk/libc/nptl/ChangeLog
==============================================================================
--- fsf/trunk/libc/nptl/ChangeLog (original)
+++ fsf/trunk/libc/nptl/ChangeLog Sat Mar 30 00:01:57 2013
@@ -1,3 +1,8 @@
+2013-03-28 Roland McGrath <roland@xxxxxxxxxxxxx>
+
+ * pthread_create.c (start_thread) [!SHARED]:
+ Call __call_tls_dtors only if it's not NULL.
+
2013-03-19 Siddhesh Poyarekar <siddhesh@xxxxxxxxxx>
* allocatestack.c (allocate_stack): Use __default_pthread_attr
Modified: fsf/trunk/libc/nptl/pthread_create.c
==============================================================================
--- fsf/trunk/libc/nptl/pthread_create.c (original)
+++ fsf/trunk/libc/nptl/pthread_create.c Sat Mar 30 00:01:57 2013
@@ -312,7 +312,10 @@
}
/* Call destructors for the thread_local TLS variables. */
- __call_tls_dtors ();
+#ifndef SHARED
+ if (&__call_tls_dtors != NULL)
+#endif
+ __call_tls_dtors ();
/* Run the destructor for the thread-local data. */
__nptl_deallocate_tsd ();
Modified: fsf/trunk/libc/stdlib/cxa_thread_atexit_impl.c
==============================================================================
--- fsf/trunk/libc/stdlib/cxa_thread_atexit_impl.c (original)
+++ fsf/trunk/libc/stdlib/cxa_thread_atexit_impl.c Sat Mar 30 00:01:57 2013
@@ -76,7 +76,7 @@
}
/* Call the destructors. This is called either when a thread returns from the
- initial function or when the process exits via the exit(3) function. */
+ initial function or when the process exits via the exit function. */
void
__call_tls_dtors (void)
{
Modified: fsf/trunk/libc/stdlib/exit.c
==============================================================================
--- fsf/trunk/libc/stdlib/exit.c (original)
+++ fsf/trunk/libc/stdlib/exit.c Sat Mar 30 00:01:57 2013
@@ -34,7 +34,10 @@
bool run_list_atexit)
{
/* First, call the TLS destructors. */
- __call_tls_dtors ();
+#ifndef SHARED
+ if (&__call_tls_dtors != NULL)
+#endif
+ __call_tls_dtors ();
/* We do it this way to handle recursive calls to exit () made by
the functions registered with `atexit' and `on_exit'. We call
Modified: fsf/trunk/libc/sysdeps/ieee754/dbl-64/e_atan2.c
==============================================================================
--- fsf/trunk/libc/sysdeps/ieee754/dbl-64/e_atan2.c (original)
+++ fsf/trunk/libc/sysdeps/ieee754/dbl-64/e_atan2.c Sat Mar 30 00:01:57 2013
@@ -104,7 +104,7 @@
if (dy == 0x00000000)
{
if ((ux & 0x80000000) == 0x00000000)
- return ZERO;
+ return 0;
else
return opi.d;
}
@@ -114,14 +114,14 @@
if (dy == 0x00000000)
{
if ((ux & 0x80000000) == 0x00000000)
- return MZERO;
+ return -0.0;
else
return mopi.d;
}
}
/* x=+-0 */
- if (x == ZERO)
+ if (x == 0)
{
if ((uy & 0x80000000) == 0x00000000)
return hpi.d;
@@ -147,9 +147,9 @@
else
{
if ((uy & 0x80000000) == 0x00000000)
- return ZERO;
+ return 0;
else
- return MZERO;
+ return -0.0;
}
}
}
@@ -190,16 +190,16 @@
}
/* either x/y or y/x is very close to zero */
- ax = (x < ZERO) ? -x : x;
- ay = (y < ZERO) ? -y : y;
+ ax = (x < 0) ? -x : x;
+ ay = (y < 0) ? -y : y;
de = (uy & 0x7ff00000) - (ux & 0x7ff00000);
if (de >= ep)
{
- return ((y > ZERO) ? hpi.d : mhpi.d);
+ return ((y > 0) ? hpi.d : mhpi.d);
}
else if (de <= em)
{
- if (x > ZERO)
+ if (x > 0)
{
if ((z = ay / ax) < TWOM1022)
return normalized (ax, ay, y, z);
@@ -208,7 +208,7 @@
}
else
{
- return ((y > ZERO) ? opi.d : mopi.d);
+ return ((y > 0) ? opi.d : mopi.d);
}
}
@@ -240,7 +240,7 @@
du = ((ax - v) - vv) / ay;
}
- if (x > ZERO)
+ if (x > 0)
{
/* (i) x>0, abs(y)< abs(x): atan(ay/ax) */
if (ay < ax)
@@ -262,7 +262,7 @@
MUL2 (u, du, u, du, v, vv, t1, t2, t3, t4, t5, t6, t7, t8);
s1 = v * (f11.d + v * (f13.d
+ v * (f15.d + v * (f17.d + v * f19.d))));
- ADD2 (f9.d, ff9.d, s1, ZERO, s2, ss2, t1, t2);
+ ADD2 (f9.d, ff9.d, s1, 0, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
@@ -314,7 +314,7 @@
+ v * (hij[i][13].d
+ v * (hij[i][14].d
+ v * hij[i][15].d))));
- ADD2 (hij[i][9].d, hij[i][10].d, s1, ZERO, s2, ss2, t1, t2);
+ ADD2 (hij[i][9].d, hij[i][10].d, s1, 0, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
@@ -348,7 +348,7 @@
s1 = v * (f11.d
+ v * (f13.d
+ v * (f15.d + v * (f17.d + v * f19.d))));
- ADD2 (f9.d, ff9.d, s1, ZERO, s2, ss2, t1, t2);
+ ADD2 (f9.d, ff9.d, s1, 0, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
@@ -391,7 +391,7 @@
+ v * (hij[i][14].d
+ v * hij[i][15].d))));
- ADD2 (hij[i][9].d, hij[i][10].d, s1, ZERO, s2, ss2, t1, t2);
+ ADD2 (hij[i][9].d, hij[i][10].d, s1, 0, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
@@ -426,7 +426,7 @@
MUL2 (u, du, u, du, v, vv, t1, t2, t3, t4, t5, t6, t7, t8);
s1 = v * (f11.d
+ v * (f13.d + v * (f15.d + v * (f17.d + v * f19.d))));
- ADD2 (f9.d, ff9.d, s1, ZERO, s2, ss2, t1, t2);
+ ADD2 (f9.d, ff9.d, s1, 0, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
@@ -466,7 +466,7 @@
+ v * (hij[i][13].d
+ v * (hij[i][14].d
+ v * hij[i][15].d))));
- ADD2 (hij[i][9].d, hij[i][10].d, s1, ZERO, s2, ss2, t1, t2);
+ ADD2 (hij[i][9].d, hij[i][10].d, s1, 0, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
@@ -497,7 +497,7 @@
MUL2 (u, du, u, du, v, vv, t1, t2, t3, t4, t5, t6, t7, t8);
s1 = v * (f11.d + v * (f13.d + v * (f15.d + v * (f17.d + v * f19.d))));
- ADD2 (f9.d, ff9.d, s1, ZERO, s2, ss2, t1, t2);
+ ADD2 (f9.d, ff9.d, s1, 0, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
@@ -538,7 +538,7 @@
+ v * (hij[i][13].d
+ v * (hij[i][14].d + v * hij[i][15].d))));
- ADD2 (hij[i][9].d, hij[i][10].d, s1, ZERO, s2, ss2, t1, t2);
+ ADD2 (hij[i][9].d, hij[i][10].d, s1, 0, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
Modified: fsf/trunk/libc/sysdeps/ieee754/dbl-64/e_log.c
==============================================================================
--- fsf/trunk/libc/sysdeps/ieee754/dbl-64/e_log.c (original)
+++ fsf/trunk/libc/sysdeps/ieee754/dbl-64/e_log.c Sat Mar 30 00:01:57 2013
@@ -44,7 +44,7 @@
# define SECTION
#endif
-void __mplog(mp_no *, mp_no *, int);
+void __mplog (mp_no *, mp_no *, int);
/*********************************************************************/
/* An ultimate log routine. Given an IEEE double machine number x */
@@ -52,163 +52,201 @@
/*********************************************************************/
double
SECTION
-__ieee754_log(double x) {
+__ieee754_log (double x)
+{
#define M 4
- static const int pr[M]={8,10,18,32};
- int i,j,n,ux,dx,p;
- double dbl_n,u,p0,q,r0,w,nln2a,luai,lubi,lvaj,lvbj,
- sij,ssij,ttij,A,B,B0,y,y1,y2,polI,polII,sa,sb,
- t1,t2,t7,t8,t,ra,rb,ww,
- a0,aa0,s1,s2,ss2,s3,ss3,a1,aa1,a,aa,b,bb,c;
+ static const int pr[M] = {8, 10, 18, 32};
+ int i, j, n, ux, dx, p;
+ double dbl_n, u, p0, q, r0, w, nln2a, luai, lubi, lvaj, lvbj,
+ sij, ssij, ttij, A, B, B0, y, y1, y2, polI, polII, sa, sb,
+ t1, t2, t7, t8, t, ra, rb, ww,
+ a0, aa0, s1, s2, ss2, s3, ss3, a1, aa1, a, aa, b, bb, c;
#ifndef DLA_FMS
- double t3,t4,t5,t6;
+ double t3, t4, t5, t6;
#endif
number num;
- mp_no mpx,mpy,mpy1,mpy2,mperr;
+ mp_no mpx, mpy, mpy1, mpy2, mperr;
#include "ulog.tbl"
#include "ulog.h"
/* Treating special values of x ( x<=0, x=INF, x=NaN etc.). */
- num.d = x; ux = num.i[HIGH_HALF]; dx = num.i[LOW_HALF];
- n=0;
- if (__builtin_expect(ux < 0x00100000, 0)) {
- if (__builtin_expect(((ux & 0x7fffffff) | dx) == 0, 0))
- return MHALF/ZERO; /* return -INF */
- if (__builtin_expect(ux < 0, 0))
- return (x-x)/ZERO; /* return NaN */
- n -= 54; x *= two54.d; /* scale x */
- num.d = x;
- }
- if (__builtin_expect(ux >= 0x7ff00000, 0))
- return x+x; /* INF or NaN */
+ num.d = x;
+ ux = num.i[HIGH_HALF];
+ dx = num.i[LOW_HALF];
+ n = 0;
+ if (__builtin_expect (ux < 0x00100000, 0))
+ {
+ if (__builtin_expect (((ux & 0x7fffffff) | dx) == 0, 0))
+ return MHALF / 0.0; /* return -INF */
+ if (__builtin_expect (ux < 0, 0))
+ return (x - x) / 0.0; /* return NaN */
+ n -= 54;
+ x *= two54.d; /* scale x */
+ num.d = x;
+ }
+ if (__builtin_expect (ux >= 0x7ff00000, 0))
+ return x + x; /* INF or NaN */
/* Regular values of x */
- w = x-ONE;
- if (__builtin_expect(ABS(w) > U03, 1)) { goto case_03; }
-
+ w = x - 1;
+ if (__builtin_expect (ABS (w) > U03, 1))
+ goto case_03;
/*--- Stage I, the case abs(x-1) < 0.03 */
- t8 = MHALF*w;
- EMULV(t8,w,a,aa,t1,t2,t3,t4,t5)
- EADD(w,a,b,bb)
-
+ t8 = MHALF * w;
+ EMULV (t8, w, a, aa, t1, t2, t3, t4, t5);
+ EADD (w, a, b, bb);
/* Evaluate polynomial II */
- polII = (b0.d+w*(b1.d+w*(b2.d+w*(b3.d+w*(b4.d+
- w*(b5.d+w*(b6.d+w*(b7.d+w*b8.d))))))))*w*w*w;
- c = (aa+bb)+polII;
+ polII = b7.d + w * b8.d;
+ polII = b6.d + w * polII;
+ polII = b5.d + w * polII;
+ polII = b4.d + w * polII;
+ polII = b3.d + w * polII;
+ polII = b2.d + w * polII;
+ polII = b1.d + w * polII;
+ polII = b0.d + w * polII;
+ polII *= w * w * w;
+ c = (aa + bb) + polII;
/* End stage I, case abs(x-1) < 0.03 */
- if ((y=b+(c+b*E2)) == b+(c-b*E2)) return y;
+ if ((y = b + (c + b * E2)) == b + (c - b * E2))
+ return y;
/*--- Stage II, the case abs(x-1) < 0.03 */
- a = d11.d+w*(d12.d+w*(d13.d+w*(d14.d+w*(d15.d+w*(d16.d+
- w*(d17.d+w*(d18.d+w*(d19.d+w*d20.d))))))));
- EMULV(w,a,s2,ss2,t1,t2,t3,t4,t5)
- ADD2(d10.d,dd10.d,s2,ss2,s3,ss3,t1,t2)
- MUL2(w,ZERO,s3,ss3,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(d9.d,dd9.d,s2,ss2,s3,ss3,t1,t2)
- MUL2(w,ZERO,s3,ss3,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(d8.d,dd8.d,s2,ss2,s3,ss3,t1,t2)
- MUL2(w,ZERO,s3,ss3,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(d7.d,dd7.d,s2,ss2,s3,ss3,t1,t2)
- MUL2(w,ZERO,s3,ss3,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(d6.d,dd6.d,s2,ss2,s3,ss3,t1,t2)
- MUL2(w,ZERO,s3,ss3,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(d5.d,dd5.d,s2,ss2,s3,ss3,t1,t2)
- MUL2(w,ZERO,s3,ss3,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(d4.d,dd4.d,s2,ss2,s3,ss3,t1,t2)
- MUL2(w,ZERO,s3,ss3,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(d3.d,dd3.d,s2,ss2,s3,ss3,t1,t2)
- MUL2(w,ZERO,s3,ss3,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(d2.d,dd2.d,s2,ss2,s3,ss3,t1,t2)
- MUL2(w,ZERO,s3,ss3,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
- MUL2(w,ZERO,s2,ss2,s3,ss3,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(w,ZERO, s3,ss3, b, bb,t1,t2)
+ a = d19.d + w * d20.d;
+ a = d18.d + w * a;
+ a = d17.d + w * a;
+ a = d16.d + w * a;
+ a = d15.d + w * a;
+ a = d14.d + w * a;
+ a = d13.d + w * a;
+ a = d12.d + w * a;
+ a = d11.d + w * a;
+
+ EMULV (w, a, s2, ss2, t1, t2, t3, t4, t5);
+ ADD2 (d10.d, dd10.d, s2, ss2, s3, ss3, t1, t2);
+ MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (d9.d, dd9.d, s2, ss2, s3, ss3, t1, t2);
+ MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (d8.d, dd8.d, s2, ss2, s3, ss3, t1, t2);
+ MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (d7.d, dd7.d, s2, ss2, s3, ss3, t1, t2);
+ MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (d6.d, dd6.d, s2, ss2, s3, ss3, t1, t2);
+ MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (d5.d, dd5.d, s2, ss2, s3, ss3, t1, t2);
+ MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (d4.d, dd4.d, s2, ss2, s3, ss3, t1, t2);
+ MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (d3.d, dd3.d, s2, ss2, s3, ss3, t1, t2);
+ MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (d2.d, dd2.d, s2, ss2, s3, ss3, t1, t2);
+ MUL2 (w, 0, s3, ss3, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (w, 0, s2, ss2, s3, ss3, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (w, 0, s3, ss3, b, bb, t1, t2);
/* End stage II, case abs(x-1) < 0.03 */
- if ((y=b+(bb+b*E4)) == b+(bb-b*E4)) return y;
+ if ((y = b + (bb + b * E4)) == b + (bb - b * E4))
+ return y;
goto stage_n;
/*--- Stage I, the case abs(x-1) > 0.03 */
- case_03:
+case_03:
/* Find n,u such that x = u*2**n, 1/sqrt(2) < u < sqrt(2) */
n += (num.i[HIGH_HALF] >> 20) - 1023;
num.i[HIGH_HALF] = (num.i[HIGH_HALF] & 0x000fffff) | 0x3ff00000;
- if (num.d > SQRT_2) { num.d *= HALF; n++; }
- u = num.d; dbl_n = (double) n;
+ if (num.d > SQRT_2)
+ {
+ num.d *= HALF;
+ n++;
+ }
+ u = num.d;
+ dbl_n = (double) n;
/* Find i such that ui=1+(i-75)/2**8 is closest to u (i= 0,1,2,...,181) */
num.d += h1.d;
i = (num.i[HIGH_HALF] & 0x000fffff) >> 12;
/* Find j such that vj=1+(j-180)/2**16 is closest to v=u/ui (j= 0,...,361) */
- num.d = u*Iu[i].d + h2.d;
+ num.d = u * Iu[i].d + h2.d;
j = (num.i[HIGH_HALF] & 0x000fffff) >> 4;
/* Compute w=(u-ui*vj)/(ui*vj) */
- p0=(ONE+(i-75)*DEL_U)*(ONE+(j-180)*DEL_V);
- q=u-p0; r0=Iu[i].d*Iv[j].d; w=q*r0;
+ p0 = (1 + (i - 75) * DEL_U) * (1 + (j - 180) * DEL_V);
+ q = u - p0;
+ r0 = Iu[i].d * Iv[j].d;
+ w = q * r0;
/* Evaluate polynomial I */
- polI = w+(a2.d+a3.d*w)*w*w;
+ polI = w + (a2.d + a3.d * w) * w * w;
/* Add up everything */
- nln2a = dbl_n*LN2A;
- luai = Lu[i][0].d; lubi = Lu[i][1].d;
- lvaj = Lv[j][0].d; lvbj = Lv[j][1].d;
- EADD(luai,lvaj,sij,ssij)
- EADD(nln2a,sij,A ,ttij)
- B0 = (((lubi+lvbj)+ssij)+ttij)+dbl_n*LN2B;
- B = polI+B0;
+ nln2a = dbl_n * LN2A;
+ luai = Lu[i][0].d;
+ lubi = Lu[i][1].d;
+ lvaj = Lv[j][0].d;
+ lvbj = Lv[j][1].d;
+ EADD (luai, lvaj, sij, ssij);
+ EADD (nln2a, sij, A, ttij);
+ B0 = (((lubi + lvbj) + ssij) + ttij) + dbl_n * LN2B;
+ B = polI + B0;
/* End stage I, case abs(x-1) >= 0.03 */
- if ((y=A+(B+E1)) == A+(B-E1)) return y;
+ if ((y = A + (B + E1)) == A + (B - E1))
+ return y;
/*--- Stage II, the case abs(x-1) > 0.03 */
/* Improve the accuracy of r0 */
- EMULV(p0,r0,sa,sb,t1,t2,t3,t4,t5)
- t=r0*((ONE-sa)-sb);
- EADD(r0,t,ra,rb)
+ EMULV (p0, r0, sa, sb, t1, t2, t3, t4, t5);
+ t = r0 * ((1 - sa) - sb);
+ EADD (r0, t, ra, rb);
/* Compute w */
- MUL2(q,ZERO,ra,rb,w,ww,t1,t2,t3,t4,t5,t6,t7,t8)
-
- EADD(A,B0,a0,aa0)
+ MUL2 (q, 0, ra, rb, w, ww, t1, t2, t3, t4, t5, t6, t7, t8);
+
+ EADD (A, B0, a0, aa0);
/* Evaluate polynomial III */
- s1 = (c3.d+(c4.d+c5.d*w)*w)*w;
- EADD(c2.d,s1,s2,ss2)
- MUL2(s2,ss2,w,ww,s3,ss3,t1,t2,t3,t4,t5,t6,t7,t8)
- MUL2(s3,ss3,w,ww,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(s2,ss2,w,ww,s3,ss3,t1,t2)
- ADD2(s3,ss3,a0,aa0,a1,aa1,t1,t2)
+ s1 = (c3.d + (c4.d + c5.d * w) * w) * w;
+ EADD (c2.d, s1, s2, ss2);
+ MUL2 (s2, ss2, w, ww, s3, ss3, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (s3, ss3, w, ww, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (s2, ss2, w, ww, s3, ss3, t1, t2);
+ ADD2 (s3, ss3, a0, aa0, a1, aa1, t1, t2);
/* End stage II, case abs(x-1) >= 0.03 */
- if ((y=a1+(aa1+E3)) == a1+(aa1-E3)) return y;
+ if ((y = a1 + (aa1 + E3)) == a1 + (aa1 - E3))
+ return y;
/* Final stages. Use multi-precision arithmetic. */
- stage_n:
-
- for (i=0; i<M; i++) {
- p = pr[i];
- __dbl_mp(x,&mpx,p); __dbl_mp(y,&mpy,p);
- __mplog(&mpx,&mpy,p);
- __dbl_mp(e[i].d,&mperr,p);
- __add(&mpy,&mperr,&mpy1,p); __sub(&mpy,&mperr,&mpy2,p);
- __mp_dbl(&mpy1,&y1,p); __mp_dbl(&mpy2,&y2,p);
- if (y1==y2) return y1;
- }
+stage_n:
+
+ for (i = 0; i < M; i++)
+ {
+ p = pr[i];
+ __dbl_mp (x, &mpx, p);
+ __dbl_mp (y, &mpy, p);
+ __mplog (&mpx, &mpy, p);
+ __dbl_mp (e[i].d, &mperr, p);
+ __add (&mpy, &mperr, &mpy1, p);
+ __sub (&mpy, &mperr, &mpy2, p);
+ __mp_dbl (&mpy1, &y1, p);
+ __mp_dbl (&mpy2, &y2, p);
+ if (y1 == y2)
+ return y1;
+ }
return y1;
}
+
#ifndef __ieee754_log
strong_alias (__ieee754_log, __log_finite)
#endif
Modified: fsf/trunk/libc/sysdeps/ieee754/dbl-64/mpa.h
==============================================================================
--- fsf/trunk/libc/sysdeps/ieee754/dbl-64/mpa.h (original)
+++ fsf/trunk/libc/sysdeps/ieee754/dbl-64/mpa.h Sat Mar 30 00:01:57 2013
@@ -91,10 +91,6 @@
# define TWO52 0x1.0p52 /* 2^52 */
#endif
-#define ZERO 0.0 /* 0 */
-#define MZERO -0.0 /* 0 with the sign bit set */
-#define ONE 1.0 /* 1 */
-#define MONE -1.0 /* -1 */
#define TWO 2.0 /* 2 */
#define TWO5 TWOPOW (5) /* 2^5 */
@@ -152,10 +148,10 @@
rem += 24;
}
/* The sign of any 2^x is always positive. */
- Y[0] = ONE;
+ Y[0] = 1;
Y[1] = 1 << rem;
- /* Everything else is ZERO. */
+ /* Everything else is 0. */
for (i = 2; i <= p; i++)
- Y[i] = ZERO;
+ Y[i] = 0;
}
Modified: fsf/trunk/libc/sysdeps/ieee754/dbl-64/mpatan.c
==============================================================================
--- fsf/trunk/libc/sysdeps/ieee754/dbl-64/mpatan.c (original)
+++ fsf/trunk/libc/sysdeps/ieee754/dbl-64/mpatan.c Sat Mar 30 00:01:57 2013
@@ -74,7 +74,7 @@
}
}
mptwoim1.e = 1;
- mptwoim1.d[0] = ONE;
+ mptwoim1.d[0] = 1;
/* Reduce x m times. */
__sqr (x, &mpsm, p);
Modified: fsf/trunk/libc/sysdeps/ieee754/dbl-64/mpatan2.c
==============================================================================
--- fsf/trunk/libc/sysdeps/ieee754/dbl-64/mpatan2.c (original)
+++ fsf/trunk/libc/sysdeps/ieee754/dbl-64/mpatan2.c Sat Mar 30 00:01:57 2013
@@ -46,12 +46,12 @@
{
mp_no mpt1, mpt2, mpt3;
- if (X[0] <= ZERO)
+ if (X[0] <= 0)
{
__dvd (x, y, &mpt1, p);
__mul (&mpt1, &mpt1, &mpt2, p);
- if (mpt1.d[0] != ZERO)
- mpt1.d[0] = ONE;
+ if (mpt1.d[0] != 0)
+ mpt1.d[0] = 1;
__add (&mpt2, &mpone, &mpt3, p);
__mpsqrt (&mpt3, &mpt2, p);
__add (&mpt1, &mpt2, &mpt3, p);
Modified: fsf/trunk/libc/sysdeps/ieee754/dbl-64/mpexp.c
==============================================================================
--- fsf/trunk/libc/sysdeps/ieee754/dbl-64/mpexp.c (original)
+++ fsf/trunk/libc/sysdeps/ieee754/dbl-64/mpexp.c Sat Mar 30 00:01:57 2013
@@ -85,7 +85,7 @@
{
for (i = 2; i <= p; i++)
{
- if (X[i] != ZERO)
+ if (X[i] != 0)
break;
}
if (i == p + 1)
Modified: fsf/trunk/libc/sysdeps/ieee754/dbl-64/mptan.c
==============================================================================
--- fsf/trunk/libc/sysdeps/ieee754/dbl-64/mptan.c (original)
+++ fsf/trunk/libc/sysdeps/ieee754/dbl-64/mptan.c Sat Mar 30 00:01:57 2013
@@ -56,7 +56,7 @@
if (n)
{
__dvd (&mpc, &mps, mpy, p);
- mpy->d[0] *= MONE;
+ mpy->d[0] *= -1;
}
/* tan is negative in this area. */
else
Modified: fsf/trunk/libc/sysdeps/ieee754/dbl-64/s_atan.c
==============================================================================
--- fsf/trunk/libc/sysdeps/ieee754/dbl-64/s_atan.c (original)
+++ fsf/trunk/libc/sysdeps/ieee754/dbl-64/s_atan.c Sat Mar 30 00:01:57 2013
@@ -43,177 +43,272 @@
#include "atnat.h"
#include <math.h>
-void __mpatan(mp_no *,mp_no *,int); /* see definition in mpatan.c */
-static double atanMp(double,const int[]);
+void __mpatan (mp_no *, mp_no *, int); /* see definition in mpatan.c */
+static double atanMp (double, const int[]);
/* Fix the sign of y and return */
-static double __signArctan(double x,double y){
- return __copysign(y, x);
+static double
+__signArctan (double x, double y)
+{
+ return __copysign (y, x);
}
/* An ultimate atan() routine. Given an IEEE double machine number x, */
/* routine computes the correctly rounded (to nearest) value of atan(x). */
-double atan(double x) {
-
-
- double cor,s1,ss1,s2,ss2,t1,t2,t3,t7,t8,t9,t10,u,u2,u3,
- v,vv,w,ww,y,yy,z,zz;
+double
+atan (double x)
+{
+ double cor, s1, ss1, s2, ss2, t1, t2, t3, t7, t8, t9, t10, u, u2, u3,
+ v, vv, w, ww, y, yy, z, zz;
#ifndef DLA_FMS
- double t4,t5,t6;
+ double t4, t5, t6;
#endif
- int i,ux,dx;
- static const int pr[M]={6,8,10,32};
+ int i, ux, dx;
+ static const int pr[M] = { 6, 8, 10, 32 };
number num;
- num.d = x; ux = num.i[HIGH_HALF]; dx = num.i[LOW_HALF];
+ num.d = x;
+ ux = num.i[HIGH_HALF];
+ dx = num.i[LOW_HALF];
/* x=NaN */
- if (((ux&0x7ff00000)==0x7ff00000) && (((ux&0x000fffff)|dx)!=0x00000000))
- return x+x;
+ if (((ux & 0x7ff00000) == 0x7ff00000)
+ && (((ux & 0x000fffff) | dx) != 0x00000000))
+ return x + x;
/* Regular values of x, including denormals +-0 and +-INF */
- u = (x<ZERO) ? -x : x;
- if (u<C) {
- if (u<B) {
- if (u<A) { /* u < A */
- return x; }
- else { /* A <= u < B */
- v=x*x; yy=x*v*(d3.d+v*(d5.d+v*(d7.d+v*(d9.d+v*(d11.d+v*d13.d)))));
- if ((y=x+(yy-U1*x)) == x+(yy+U1*x)) return y;
-
- EMULV(x,x,v,vv,t1,t2,t3,t4,t5) /* v+vv=x^2 */
- s1=v*(f11.d+v*(f13.d+v*(f15.d+v*(f17.d+v*f19.d))));
- ADD2(f9.d,ff9.d,s1,ZERO,s2,ss2,t1,t2)
- MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(f7.d,ff7.d,s1,ss1,s2,ss2,t1,t2)
- MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(f5.d,ff5.d,s1,ss1,s2,ss2,t1,t2)
- MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(f3.d,ff3.d,s1,ss1,s2,ss2,t1,t2)
- MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
- MUL2(x,ZERO,s1,ss1,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(x,ZERO,s2,ss2,s1,ss1,t1,t2)
- if ((y=s1+(ss1-U5*s1)) == s1+(ss1+U5*s1)) return y;
-
- return atanMp(x,pr);
- } }
- else { /* B <= u < C */
- i=(TWO52+TWO8*u)-TWO52; i-=16;
- z=u-cij[i][0].d;
- yy=z*(cij[i][2].d+z*(cij[i][3].d+z*(cij[i][4].d+
- z*(cij[i][5].d+z* cij[i][6].d))));
- t1=cij[i][1].d;
- if (i<112) {
- if (i<48) u2=U21; /* u < 1/4 */
- else u2=U22; } /* 1/4 <= u < 1/2 */
- else {
- if (i<176) u2=U23; /* 1/2 <= u < 3/4 */
- else u2=U24; } /* 3/4 <= u <= 1 */
- if ((y=t1+(yy-u2*t1)) == t1+(yy+u2*t1)) return __signArctan(x,y);
-
- z=u-hij[i][0].d;
- s1=z*(hij[i][11].d+z*(hij[i][12].d+z*(hij[i][13].d+
- z*(hij[i][14].d+z* hij[i][15].d))));
- ADD2(hij[i][9].d,hij[i][10].d,s1,ZERO,s2,ss2,t1,t2)
- MUL2(z,ZERO,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(hij[i][7].d,hij[i][8].d,s1,ss1,s2,ss2,t1,t2)
- MUL2(z,ZERO,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(hij[i][5].d,hij[i][6].d,s1,ss1,s2,ss2,t1,t2)
- MUL2(z,ZERO,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(hij[i][3].d,hij[i][4].d,s1,ss1,s2,ss2,t1,t2)
- MUL2(z,ZERO,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(hij[i][1].d,hij[i][2].d,s1,ss1,s2,ss2,t1,t2)
- if ((y=s2+(ss2-U6*s2)) == s2+(ss2+U6*s2)) return __signArctan(x,y);
-
- return atanMp(x,pr);
+ u = (x < 0) ? -x : x;
+ if (u < C)
+ {
+ if (u < B)
+ {
+ if (u < A)
+ return x;
+ else
+ { /* A <= u < B */
+ v = x * x;
+ yy = d11.d + v * d13.d;
+ yy = d9.d + v * yy;
+ yy = d7.d + v * yy;
+ yy = d5.d + v * yy;
+ yy = d3.d + v * yy;
+ yy *= x * v;
+
+ if ((y = x + (yy - U1 * x)) == x + (yy + U1 * x))
+ return y;
+
+ EMULV (x, x, v, vv, t1, t2, t3, t4, t5); /* v+vv=x^2 */
+
+ s1 = f17.d + v * f19.d;
+ s1 = f15.d + v * s1;
+ s1 = f13.d + v * s1;
+ s1 = f11.d + v * s1;
+ s1 *= v;
+
+ ADD2 (f9.d, ff9.d, s1, 0, s2, ss2, t1, t2);
+ MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
+ MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (f5.d, ff5.d, s1, ss1, s2, ss2, t1, t2);
+ MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (f3.d, ff3.d, s1, ss1, s2, ss2, t1, t2);
+ MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (x, 0, s1, ss1, s2, ss2, t1, t2, t3, t4, t5, t6, t7,
+ t8);
+ ADD2 (x, 0, s2, ss2, s1, ss1, t1, t2);
+ if ((y = s1 + (ss1 - U5 * s1)) == s1 + (ss1 + U5 * s1))
+ return y;
+
+ return atanMp (x, pr);
+ }
+ }
+ else
+ { /* B <= u < C */
+ i = (TWO52 + TWO8 * u) - TWO52;
+ i -= 16;
+ z = u - cij[i][0].d;
+ yy = cij[i][5].d + z * cij[i][6].d;
+ yy = cij[i][4].d + z * yy;
+ yy = cij[i][3].d + z * yy;
+ yy = cij[i][2].d + z * yy;
+ yy *= z;
+
+ t1 = cij[i][1].d;
+ if (i < 112)
+ {
+ if (i < 48)
+ u2 = U21; /* u < 1/4 */
+ else
+ u2 = U22;
+ } /* 1/4 <= u < 1/2 */
+ else
+ {
+ if (i < 176)
+ u2 = U23; /* 1/2 <= u < 3/4 */
+ else
+ u2 = U24;
+ } /* 3/4 <= u <= 1 */
+ if ((y = t1 + (yy - u2 * t1)) == t1 + (yy + u2 * t1))
+ return __signArctan (x, y);
+
+ z = u - hij[i][0].d;
+
+ s1 = hij[i][14].d + z * hij[i][15].d;
+ s1 = hij[i][13].d + z * s1;
+ s1 = hij[i][12].d + z * s1;
+ s1 = hij[i][11].d + z * s1;
+ s1 *= z;
+
+ ADD2 (hij[i][9].d, hij[i][10].d, s1, 0, s2, ss2, t1, t2);
+ MUL2 (z, 0, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
+ MUL2 (z, 0, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (hij[i][5].d, hij[i][6].d, s1, ss1, s2, ss2, t1, t2);
+ MUL2 (z, 0, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (hij[i][3].d, hij[i][4].d, s1, ss1, s2, ss2, t1, t2);
+ MUL2 (z, 0, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (hij[i][1].d, hij[i][2].d, s1, ss1, s2, ss2, t1, t2);
+ if ((y = s2 + (ss2 - U6 * s2)) == s2 + (ss2 + U6 * s2))
+ return __signArctan (x, y);
+
+ return atanMp (x, pr);
+ }
}
- }
- else {
- if (u<D) { /* C <= u < D */
- w=ONE/u;
- EMULV(w,u,t1,t2,t3,t4,t5,t6,t7)
- ww=w*((ONE-t1)-t2);
- i=(TWO52+TWO8*w)-TWO52; i-=16;
- z=(w-cij[i][0].d)+ww;
- yy=HPI1-z*(cij[i][2].d+z*(cij[i][3].d+z*(cij[i][4].d+
- z*(cij[i][5].d+z* cij[i][6].d))));
- t1=HPI-cij[i][1].d;
- if (i<112) u3=U31; /* w < 1/2 */
- else u3=U32; /* w >= 1/2 */
- if ((y=t1+(yy-u3)) == t1+(yy+u3)) return __signArctan(x,y);
-
- DIV2(ONE,ZERO,u,ZERO,w,ww,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
- t1=w-hij[i][0].d;
- EADD(t1,ww,z,zz)
- s1=z*(hij[i][11].d+z*(hij[i][12].d+z*(hij[i][13].d+
- z*(hij[i][14].d+z* hij[i][15].d))));
- ADD2(hij[i][9].d,hij[i][10].d,s1,ZERO,s2,ss2,t1,t2)
- MUL2(z,zz,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(hij[i][7].d,hij[i][8].d,s1,ss1,s2,ss2,t1,t2)
- MUL2(z,zz,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(hij[i][5].d,hij[i][6].d,s1,ss1,s2,ss2,t1,t2)
- MUL2(z,zz,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(hij[i][3].d,hij[i][4].d,s1,ss1,s2,ss2,t1,t2)
- MUL2(z,zz,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(hij[i][1].d,hij[i][2].d,s1,ss1,s2,ss2,t1,t2)
- SUB2(HPI,HPI1,s2,ss2,s1,ss1,t1,t2)
- if ((y=s1+(ss1-U7)) == s1+(ss1+U7)) return __signArctan(x,y);
-
- return atanMp(x,pr);
+ else
+ {
+ if (u < D)
+ { /* C <= u < D */
+ w = 1 / u;
+ EMULV (w, u, t1, t2, t3, t4, t5, t6, t7);
+ ww = w * ((1 - t1) - t2);
+ i = (TWO52 + TWO8 * w) - TWO52;
+ i -= 16;
+ z = (w - cij[i][0].d) + ww;
+
+ yy = cij[i][5].d + z * cij[i][6].d;
+ yy = cij[i][4].d + z * yy;
+ yy = cij[i][3].d + z * yy;
+ yy = cij[i][2].d + z * yy;
+ yy = HPI1 - z * yy;
+
+ t1 = HPI - cij[i][1].d;
+ if (i < 112)
+ u3 = U31; /* w < 1/2 */
+ else
+ u3 = U32; /* w >= 1/2 */
+ if ((y = t1 + (yy - u3)) == t1 + (yy + u3))
+ return __signArctan (x, y);
+
+ DIV2 (1 , 0, u, 0, w, ww, t1, t2, t3, t4, t5, t6, t7, t8, t9,
+ t10);
+ t1 = w - hij[i][0].d;
+ EADD (t1, ww, z, zz);
+
+ s1 = hij[i][14].d + z * hij[i][15].d;
+ s1 = hij[i][13].d + z * s1;
+ s1 = hij[i][12].d + z * s1;
+ s1 = hij[i][11].d + z * s1;
+ s1 *= z;
+
+ ADD2 (hij[i][9].d, hij[i][10].d, s1, 0, s2, ss2, t1, t2);
+ MUL2 (z, zz, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (hij[i][7].d, hij[i][8].d, s1, ss1, s2, ss2, t1, t2);
+ MUL2 (z, zz, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (hij[i][5].d, hij[i][6].d, s1, ss1, s2, ss2, t1, t2);
+ MUL2 (z, zz, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (hij[i][3].d, hij[i][4].d, s1, ss1, s2, ss2, t1, t2);
+ MUL2 (z, zz, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (hij[i][1].d, hij[i][2].d, s1, ss1, s2, ss2, t1, t2);
+ SUB2 (HPI, HPI1, s2, ss2, s1, ss1, t1, t2);
+ if ((y = s1 + (ss1 - U7)) == s1 + (ss1 + U7))
+ return __signArctan (x, y);
+
+ return atanMp (x, pr);
+ }
+ else
+ {
+ if (u < E)
+ { /* D <= u < E */
+ w = 1 / u;
+ v = w * w;
+ EMULV (w, u, t1, t2, t3, t4, t5, t6, t7);
+
+ yy = d11.d + v * d13.d;
+ yy = d9.d + v * yy;
+ yy = d7.d + v * yy;
+ yy = d5.d + v * yy;
+ yy = d3.d + v * yy;
+ yy *= w * v;
+
+ ww = w * ((1 - t1) - t2);
+ ESUB (HPI, w, t3, cor);
+ yy = ((HPI1 + cor) - ww) - yy;
+ if ((y = t3 + (yy - U4)) == t3 + (yy + U4))
+ return __signArctan (x, y);
+
+ DIV2 (1 , 0, u, 0, w, ww, t1, t2, t3, t4, t5, t6, t7, t8,
+ t9, t10);
+ MUL2 (w, ww, w, ww, v, vv, t1, t2, t3, t4, t5, t6, t7, t8);
+
+ s1 = f17.d + v * f19.d;
+ s1 = f15.d + v * s1;
+ s1 = f13.d + v * s1;
+ s1 = f11.d + v * s1;
+ s1 *= v;
+
+ ADD2 (f9.d, ff9.d, s1, 0, s2, ss2, t1, t2);
+ MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (f7.d, ff7.d, s1, ss1, s2, ss2, t1, t2);
+ MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (f5.d, ff5.d, s1, ss1, s2, ss2, t1, t2);
+ MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (f3.d, ff3.d, s1, ss1, s2, ss2, t1, t2);
+ MUL2 (v, vv, s2, ss2, s1, ss1, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (w, ww, s1, ss1, s2, ss2, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (w, ww, s2, ss2, s1, ss1, t1, t2);
+ SUB2 (HPI, HPI1, s1, ss1, s2, ss2, t1, t2);
+
+ if ((y = s2 + (ss2 - U8)) == s2 + (ss2 + U8))
+ return __signArctan (x, y);
+
+ return atanMp (x, pr);
+ }
+ else
+ {
+ /* u >= E */
+ if (x > 0)
+ return HPI;
+ else
+ return MHPI;
+ }
+ }
}
- else {
- if (u<E) { /* D <= u < E */
- w=ONE/u; v=w*w;
- EMULV(w,u,t1,t2,t3,t4,t5,t6,t7)
- yy=w*v*(d3.d+v*(d5.d+v*(d7.d+v*(d9.d+v*(d11.d+v*d13.d)))));
- ww=w*((ONE-t1)-t2);
- ESUB(HPI,w,t3,cor)
- yy=((HPI1+cor)-ww)-yy;
- if ((y=t3+(yy-U4)) == t3+(yy+U4)) return __signArctan(x,y);
-
- DIV2(ONE,ZERO,u,ZERO,w,ww,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
- MUL2(w,ww,w,ww,v,vv,t1,t2,t3,t4,t5,t6,t7,t8)
- s1=v*(f11.d+v*(f13.d+v*(f15.d+v*(f17.d+v*f19.d))));
- ADD2(f9.d,ff9.d,s1,ZERO,s2,ss2,t1,t2)
- MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(f7.d,ff7.d,s1,ss1,s2,ss2,t1,t2)
- MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(f5.d,ff5.d,s1,ss1,s2,ss2,t1,t2)
- MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(f3.d,ff3.d,s1,ss1,s2,ss2,t1,t2)
- MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
- MUL2(w,ww,s1,ss1,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(w,ww,s2,ss2,s1,ss1,t1,t2)
- SUB2(HPI,HPI1,s1,ss1,s2,ss2,t1,t2)
- if ((y=s2+(ss2-U8)) == s2+(ss2+U8)) return __signArctan(x,y);
-
- return atanMp(x,pr);
- }
- else {
- /* u >= E */
- if (x>0) return HPI;
- else return MHPI; }
+}
+
+ /* Final stages. Compute atan(x) by multiple precision arithmetic */
+static double
+atanMp (double x, const int pr[])
+{
+ mp_no mpx, mpy, mpy2, mperr, mpt1, mpy1;
+ double y1, y2;
+ int i, p;
+
+ for (i = 0; i < M; i++)
+ {
+ p = pr[i];
+ __dbl_mp (x, &mpx, p);
+ __mpatan (&mpx, &mpy, p);
+ __dbl_mp (u9[i].d, &mpt1, p);
+ __mul (&mpy, &mpt1, &mperr, p);
+ __add (&mpy, &mperr, &mpy1, p);
+ __sub (&mpy, &mperr, &mpy2, p);
+ __mp_dbl (&mpy1, &y1, p);
+ __mp_dbl (&mpy2, &y2, p);
+ if (y1 == y2)
+ return y1;
}
- }
-
-}
-
- /* Final stages. Compute atan(x) by multiple precision arithmetic */
-static double atanMp(double x,const int pr[]){
- mp_no mpx,mpy,mpy2,mperr,mpt1,mpy1;
- double y1,y2;
- int i,p;
-
-for (i=0; i<M; i++) {
- p = pr[i];
- __dbl_mp(x,&mpx,p); __mpatan(&mpx,&mpy,p);
- __dbl_mp(u9[i].d,&mpt1,p); __mul(&mpy,&mpt1,&mperr,p);
- __add(&mpy,&mperr,&mpy1,p); __sub(&mpy,&mperr,&mpy2,p);
- __mp_dbl(&mpy1,&y1,p); __mp_dbl(&mpy2,&y2,p);
- if (y1==y2) return y1;
- }
- return y1; /*if unpossible to do exact computing */
+ return y1; /*if impossible to do exact computing */
}
#ifdef NO_LONG_DOUBLE
Modified: fsf/trunk/libc/sysdeps/ieee754/dbl-64/s_tan.c
==============================================================================
--- fsf/trunk/libc/sysdeps/ieee754/dbl-64/s_tan.c (original)
+++ fsf/trunk/libc/sysdeps/ieee754/dbl-64/s_tan.c Sat Mar 30 00:01:57 2013
@@ -46,459 +46,782 @@
# define SECTION
#endif
-static double tanMp(double);
-void __mptan(double, mp_no *, int);
+static double tanMp (double);
+void __mptan (double, mp_no *, int);
double
SECTION
-tan(double x) {
+tan (double x)
+{
#include "utan.h"
#include "utan.tbl"
- int ux,i,n;
- double a,da,a2,b,db,c,dc,c1,cc1,c2,cc2,c3,cc3,fi,ffi,gi,pz,s,sy,
- t,t1,t2,t3,t4,t7,t8,t9,t10,w,x2,xn,xx2,y,ya,yya,z0,z,zz,z2,zz2;
+ int ux, i, n;
+ double a, da, a2, b, db, c, dc, c1, cc1, c2, cc2, c3, cc3, fi, ffi, gi, pz,
+ s, sy, t, t1, t2, t3, t4, t7, t8, t9, t10, w, x2, xn, xx2, y, ya, yya, z0,
+ z, zz, z2, zz2;
#ifndef DLA_FMS
- double t5,t6;
+ double t5, t6;
#endif
int p;
- number num,v;
- mp_no mpa,mpt1,mpt2;
+ number num, v;
+ mp_no mpa, mpt1, mpt2;
double retval;
- int __branred(double, double *, double *);
- int __mpranred(double, mp_no *, int);
+ int __branred (double, double *, double *);
+ int __mpranred (double, mp_no *, int);
SET_RESTORE_ROUND_53BIT (FE_TONEAREST);
/* x=+-INF, x=NaN */
- num.d = x; ux = num.i[HIGH_HALF];
- if ((ux&0x7ff00000)==0x7ff00000) {
- if ((ux&0x7fffffff)==0x7ff00000)
- __set_errno (EDOM);
- retval = x-x;
- goto ret;
- }
-
- w=(x<0.0) ? -x : x;
+ num.d = x;
+ ux = num.i[HIGH_HALF];
+ if ((ux & 0x7ff00000) == 0x7ff00000)
+ {
+ if ((ux & 0x7fffffff) == 0x7ff00000)
+ __set_errno (EDOM);
+ retval = x - x;
+ goto ret;
+ }
+
+ w = (x < 0.0) ? -x : x;
/* (I) The case abs(x) <= 1.259e-8 */
- if (w<=g1.d) { retval = x; goto ret; }
+ if (w <= g1.d)
+ {
+ retval = x;
+ goto ret;
+ }
/* (II) The case 1.259e-8 < abs(x) <= 0.0608 */
- if (w<=g2.d) {
-
- /* First stage */
- x2 = x*x;
- t2 = x*x2*(d3.d+x2*(d5.d+x2*(d7.d+x2*(d9.d+x2*d11.d))));
- if ((y=x+(t2-u1.d*t2)) == x+(t2+u1.d*t2)) { retval = y; goto ret; }
-
- /* Second stage */
- c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
- x2*a27.d))))));
- EMULV(x,x,x2,xx2,t1,t2,t3,t4,t5)
- ADD2(a13.d,aa13.d,c1,0.0,c2,cc2,t1,t2)
- MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
- MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
- MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
- MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
- MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
- MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- MUL2(x ,0.0,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(x ,0.0,c2,cc2,c1,cc1,t1,t2)
- if ((y=c1+(cc1-u2.d*c1)) == c1+(cc1+u2.d*c1)) { retval = y; goto ret; }
- retval = tanMp(x);
- goto ret;
- }
+ if (w <= g2.d)
+ {
+
+ /* First stage */
+ x2 = x * x;
+
+ t2 = d9.d + x2 * d11.d;
+ t2 = d7.d + x2 * t2;
+ t2 = d5.d + x2 * t2;
+ t2 = d3.d + x2 * t2;
+ t2 *= x * x2;
+
+ if ((y = x + (t2 - u1.d * t2)) == x + (t2 + u1.d * t2))
+ {
+ retval = y;
+ goto ret;
+ }
+
+ /* Second stage */
+ c1 = a25.d + x2 * a27.d;
+ c1 = a23.d + x2 * c1;
+ c1 = a21.d + x2 * c1;
+ c1 = a19.d + x2 * c1;
+ c1 = a17.d + x2 * c1;
+ c1 = a15.d + x2 * c1;
+ c1 *= x2;
+
+ EMULV (x, x, x2, xx2, t1, t2, t3, t4, t5);
+ ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
+ MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (x, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (x, 0.0, c2, cc2, c1, cc1, t1, t2);
+ if ((y = c1 + (cc1 - u2.d * c1)) == c1 + (cc1 + u2.d * c1))
+ {
+ retval = y;
+ goto ret;
+ }
+ retval = tanMp (x);
+ goto ret;
+ }
/* (III) The case 0.0608 < abs(x) <= 0.787 */
- if (w<=g3.d) {
-
- /* First stage */
- i = ((int) (mfftnhf.d+TWO8*w));
- z = w-xfg[i][0].d; z2 = z*z; s = (x<0.0) ? MONE : ONE;
- pz = z+z*z2*(e0.d+z2*e1.d);
- fi = xfg[i][1].d; gi = xfg[i][2].d; t2 = pz*(gi+fi)/(gi-pz);
- if ((y=fi+(t2-fi*u3.d))==fi+(t2+fi*u3.d)) { retval = (s*y); goto ret; }
- t3 = (t2<0.0) ? -t2 : t2;
- t4 = fi*ua3.d+t3*ub3.d;
- if ((y=fi+(t2-t4))==fi+(t2+t4)) { retval = (s*y); goto ret; }
-
- /* Second stage */
- ffi = xfg[i][3].d;
- c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
- EMULV(z,z,z2,zz2,t1,t2,t3,t4,t5)
- ADD2(a5.d,aa5.d,c1,0.0,c2,cc2,t1,t2)
- MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
- MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- MUL2(z ,0.0,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(z ,0.0,c2,cc2,c1,cc1,t1,t2)
-
- ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
- MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
- SUB2(1.0,0.0,c3,cc3,c1,cc1,t1,t2)
- DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
-
- if ((y=c3+(cc3-u4.d*c3))==c3+(cc3+u4.d*c3)) { retval = (s*y); goto ret; }
- retval = tanMp(x);
- goto ret;
- }
+ if (w <= g3.d)
+ {
+
+ /* First stage */
+ i = ((int) (mfftnhf.d + TWO8 * w));
+ z = w - xfg[i][0].d;
+ z2 = z * z;
+ s = (x < 0.0) ? -1 : 1;
+ pz = z + z * z2 * (e0.d + z2 * e1.d);
+ fi = xfg[i][1].d;
+ gi = xfg[i][2].d;
+ t2 = pz * (gi + fi) / (gi - pz);
+ if ((y = fi + (t2 - fi * u3.d)) == fi + (t2 + fi * u3.d))
+ {
+ retval = (s * y);
+ goto ret;
+ }
+ t3 = (t2 < 0.0) ? -t2 : t2;
+ t4 = fi * ua3.d + t3 * ub3.d;
+ if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
+ {
+ retval = (s * y);
+ goto ret;
+ }
+
+ /* Second stage */
+ ffi = xfg[i][3].d;
+ c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
+ EMULV (z, z, z2, zz2, t1, t2, t3, t4, t5);
+ ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
+ MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (z, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (z, 0.0, c2, cc2, c1, cc1, t1, t2);
+
+ ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
+ SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
+ DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
+ t10);
+
+ if ((y = c3 + (cc3 - u4.d * c3)) == c3 + (cc3 + u4.d * c3))
+ {
+ retval = (s * y);
+ goto ret;
+ }
+ retval = tanMp (x);
+ goto ret;
+ }
/* (---) The case 0.787 < abs(x) <= 25 */
- if (w<=g4.d) {
- /* Range reduction by algorithm i */
- t = (x*hpinv.d + toint.d);
- xn = t - toint.d;
- v.d = t;
- t1 = (x - xn*mp1.d) - xn*mp2.d;
- n =v.i[LOW_HALF] & 0x00000001;
- da = xn*mp3.d;
- a=t1-da;
- da = (t1-a)-da;
- if (a<0.0) {ya=-a; yya=-da; sy=MONE;}
- else {ya= a; yya= da; sy= ONE;}
-
- /* (IV),(V) The case 0.787 < abs(x) <= 25, abs(y) <= 1e-7 */
- if (ya<=gy1.d) { retval = tanMp(x); goto ret; }
-
- /* (VI) The case 0.787 < abs(x) <= 25, 1e-7 < abs(y) <= 0.0608 */
- if (ya<=gy2.d) {
- a2 = a*a;
- t2 = da+a*a2*(d3.d+a2*(d5.d+a2*(d7.d+a2*(d9.d+a2*d11.d))));
- if (n) {
- /* First stage -cot */
- EADD(a,t2,b,db)
- DIV2(1.0,0.0,b,db,c,dc,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
- if ((y=c+(dc-u6.d*c))==c+(dc+u6.d*c)) { retval = (-y); goto ret; } }
- else {
- /* First stage tan */
- if ((y=a+(t2-u5.d*a))==a+(t2+u5.d*a)) { retval = y; goto ret; } }
- /* Second stage */
- /* Range reduction by algorithm ii */
- t = (x*hpinv.d + toint.d);
+ if (w <= g4.d)
+ {
+ /* Range reduction by algorithm i */
+ t = (x * hpinv.d + toint.d);
xn = t - toint.d;
v.d = t;
- t1 = (x - xn*mp1.d) - xn*mp2.d;
- n =v.i[LOW_HALF] & 0x00000001;
- da = xn*pp3.d;
- t=t1-da;
- da = (t1-t)-da;
- t1 = xn*pp4.d;
+ t1 = (x - xn * mp1.d) - xn * mp2.d;
+ n = v.i[LOW_HALF] & 0x00000001;
+ da = xn * mp3.d;
+ a = t1 - da;
+ da = (t1 - a) - da;
+ if (a < 0.0)
+ {
+ ya = -a;
+ yya = -da;
+ sy = -1;
+ }
+ else
+ {
+ ya = a;
+ yya = da;
+ sy = 1;
+ }
+
+ /* (IV),(V) The case 0.787 < abs(x) <= 25, abs(y) <= 1e-7 */
+ if (ya <= gy1.d)
+ {
+ retval = tanMp (x);
+ goto ret;
+ }
+
+ /* (VI) The case 0.787 < abs(x) <= 25, 1e-7 < abs(y) <= 0.0608 */
+ if (ya <= gy2.d)
+ {
+ a2 = a * a;
+ t2 = d9.d + a2 * d11.d;
+ t2 = d7.d + a2 * t2;
+ t2 = d5.d + a2 * t2;
+ t2 = d3.d + a2 * t2;
+ t2 = da + a * a2 * t2;
+
+ if (n)
+ {
+ /* First stage -cot */
+ EADD (a, t2, b, db);
+ DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8,
+ t9, t10);
+ if ((y = c + (dc - u6.d * c)) == c + (dc + u6.d * c))
+ {
+ retval = (-y);
+ goto ret;
+ }
+ }
+ else
+ {
+ /* First stage tan */
+ if ((y = a + (t2 - u5.d * a)) == a + (t2 + u5.d * a))
+ {
+ retval = y;
+ goto ret;
+ }
+ }
+ /* Second stage */
+ /* Range reduction by algorithm ii */
+ t = (x * hpinv.d + toint.d);
+ xn = t - toint.d;
+ v.d = t;
+ t1 = (x - xn * mp1.d) - xn * mp2.d;
+ n = v.i[LOW_HALF] & 0x00000001;
+ da = xn * pp3.d;
+ t = t1 - da;
+ da = (t1 - t) - da;
+ t1 = xn * pp4.d;
+ a = t - t1;
+ da = ((t - a) - t1) + da;
+
+ /* Second stage */
+ EADD (a, da, t1, t2);
+ a = t1;
+ da = t2;
+ MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
+
+ c1 = a25.d + x2 * a27.d;
+ c1 = a23.d + x2 * c1;
+ c1 = a21.d + x2 * c1;
+ c1 = a19.d + x2 * c1;
+ c1 = a17.d + x2 * c1;
+ c1 = a15.d + x2 * c1;
+ c1 *= x2;
+
+ ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
+ MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
+
+ if (n)
+ {
+ /* Second stage -cot */
+ DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7,
+ t8, t9, t10);
+ if ((y = c2 + (cc2 - u8.d * c2)) == c2 + (cc2 + u8.d * c2))
+ {
+ retval = (-y);
+ goto ret;
+ }
+ }
+ else
+ {
+ /* Second stage tan */
+ if ((y = c1 + (cc1 - u7.d * c1)) == c1 + (cc1 + u7.d * c1))
+ {
+ retval = y;
+ goto ret;
+ }
+ }
+ retval = tanMp (x);
+ goto ret;
+ }
+
+ /* (VII) The case 0.787 < abs(x) <= 25, 0.0608 < abs(y) <= 0.787 */
+
+ /* First stage */
+ i = ((int) (mfftnhf.d + TWO8 * ya));
+ z = (z0 = (ya - xfg[i][0].d)) + yya;
+ z2 = z * z;
+ pz = z + z * z2 * (e0.d + z2 * e1.d);
+ fi = xfg[i][1].d;
+ gi = xfg[i][2].d;
+
+ if (n)
+ {
+ /* -cot */
+ t2 = pz * (fi + gi) / (fi + pz);
+ if ((y = gi - (t2 - gi * u10.d)) == gi - (t2 + gi * u10.d))
+ {
+ retval = (-sy * y);
+ goto ret;
+ }
+ t3 = (t2 < 0.0) ? -t2 : t2;
+ t4 = gi * ua10.d + t3 * ub10.d;
+ if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
+ {
+ retval = (-sy * y);
+ goto ret;
+ }
+ }
+ else
+ {
+ /* tan */
+ t2 = pz * (gi + fi) / (gi - pz);
+ if ((y = fi + (t2 - fi * u9.d)) == fi + (t2 + fi * u9.d))
+ {
+ retval = (sy * y);
+ goto ret;
+ }
+ t3 = (t2 < 0.0) ? -t2 : t2;
+ t4 = fi * ua9.d + t3 * ub9.d;
+ if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
+ {
+ retval = (sy * y);
+ goto ret;
+ }
+ }
+
+ /* Second stage */
+ ffi = xfg[i][3].d;
+ EADD (z0, yya, z, zz)
+ MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
+ c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
+ ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
+ MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
+
+ ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
+ SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
+
+ if (n)
+ {
+ /* -cot */
+ DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
+ t10);
+ if ((y = c3 + (cc3 - u12.d * c3)) == c3 + (cc3 + u12.d * c3))
+ {
+ retval = (-sy * y);
+ goto ret;
+ }
+ }
+ else
+ {
+ /* tan */
+ DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
+ t10);
+ if ((y = c3 + (cc3 - u11.d * c3)) == c3 + (cc3 + u11.d * c3))
+ {
+ retval = (sy * y);
+ goto ret;
+ }
+ }
+
+ retval = tanMp (x);
+ goto ret;
+ }
+
+ /* (---) The case 25 < abs(x) <= 1e8 */
+ if (w <= g5.d)
+ {
+ /* Range reduction by algorithm ii */
+ t = (x * hpinv.d + toint.d);
+ xn = t - toint.d;
+ v.d = t;
+ t1 = (x - xn * mp1.d) - xn * mp2.d;
+ n = v.i[LOW_HALF] & 0x00000001;
+ da = xn * pp3.d;
+ t = t1 - da;
+ da = (t1 - t) - da;
+ t1 = xn * pp4.d;
a = t - t1;
- da = ((t-a)-t1)+da;
+ da = ((t - a) - t1) + da;
+ EADD (a, da, t1, t2);
+ a = t1;
+ da = t2;
+ if (a < 0.0)
+ {
+ ya = -a;
+ yya = -da;
+ sy = -1;
+ }
+ else
+ {
+ ya = a;
+ yya = da;
+ sy = 1;
+ }
+
+ /* (+++) The case 25 < abs(x) <= 1e8, abs(y) <= 1e-7 */
+ if (ya <= gy1.d)
+ {
+ retval = tanMp (x);
+ goto ret;
+ }
+
+ /* (VIII) The case 25 < abs(x) <= 1e8, 1e-7 < abs(y) <= 0.0608 */
+ if (ya <= gy2.d)
+ {
+ a2 = a * a;
+ t2 = d9.d + a2 * d11.d;
+ t2 = d7.d + a2 * t2;
+ t2 = d5.d + a2 * t2;
+ t2 = d3.d + a2 * t2;
+ t2 = da + a * a2 * t2;
+
+ if (n)
+ {
+ /* First stage -cot */
+ EADD (a, t2, b, db);
+ DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8,
+ t9, t10);
+ if ((y = c + (dc - u14.d * c)) == c + (dc + u14.d * c))
+ {
+ retval = (-y);
+ goto ret;
+ }
+ }
+ else
+ {
+ /* First stage tan */
+ if ((y = a + (t2 - u13.d * a)) == a + (t2 + u13.d * a))
+ {
+ retval = y;
+ goto ret;
+ }
+ }
+
+ /* Second stage */
+ MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
+ c1 = a25.d + x2 * a27.d;
+ c1 = a23.d + x2 * c1;
+ c1 = a21.d + x2 * c1;
+ c1 = a19.d + x2 * c1;
+ c1 = a17.d + x2 * c1;
+ c1 = a15.d + x2 * c1;
+ c1 *= x2;
+
+ ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
+ MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
+
+ if (n)
+ {
+ /* Second stage -cot */
+ DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7,
+ t8, t9, t10);
+ if ((y = c2 + (cc2 - u16.d * c2)) == c2 + (cc2 + u16.d * c2))
+ {
+ retval = (-y);
+ goto ret;
+ }
+ }
+ else
+ {
+ /* Second stage tan */
+ if ((y = c1 + (cc1 - u15.d * c1)) == c1 + (cc1 + u15.d * c1))
+ {
+ retval = (y);
+ goto ret;
+ }
+ }
+ retval = tanMp (x);
+ goto ret;
+ }
+
+ /* (IX) The case 25 < abs(x) <= 1e8, 0.0608 < abs(y) <= 0.787 */
+ /* First stage */
+ i = ((int) (mfftnhf.d + TWO8 * ya));
+ z = (z0 = (ya - xfg[i][0].d)) + yya;
+ z2 = z * z;
+ pz = z + z * z2 * (e0.d + z2 * e1.d);
+ fi = xfg[i][1].d;
+ gi = xfg[i][2].d;
+
+ if (n)
+ {
+ /* -cot */
+ t2 = pz * (fi + gi) / (fi + pz);
+ if ((y = gi - (t2 - gi * u18.d)) == gi - (t2 + gi * u18.d))
+ {
+ retval = (-sy * y);
+ goto ret;
+ }
+ t3 = (t2 < 0.0) ? -t2 : t2;
+ t4 = gi * ua18.d + t3 * ub18.d;
+ if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
+ {
+ retval = (-sy * y);
+ goto ret;
+ }
+ }
+ else
+ {
+ /* tan */
+ t2 = pz * (gi + fi) / (gi - pz);
+ if ((y = fi + (t2 - fi * u17.d)) == fi + (t2 + fi * u17.d))
+ {
+ retval = (sy * y);
+ goto ret;
+ }
+ t3 = (t2 < 0.0) ? -t2 : t2;
+ t4 = fi * ua17.d + t3 * ub17.d;
+ if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
+ {
+ retval = (sy * y);
+ goto ret;
+ }
+ }
/* Second stage */
- EADD(a,da,t1,t2) a=t1; da=t2;
- MUL2(a,da,a,da,x2,xx2,t1,t2,t3,t4,t5,t6,t7,t8)
- c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
- x2*a27.d))))));
- ADD2(a13.d,aa13.d,c1,0.0,c2,cc2,t1,t2)
- MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
- MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
- MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
- MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
- MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
- MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- MUL2(a ,da ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a ,da ,c2,cc2,c1,cc1,t1,t2)
-
- if (n) {
- /* Second stage -cot */
- DIV2(1.0,0.0,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
- if ((y=c2+(cc2-u8.d*c2)) == c2+(cc2+u8.d*c2)) { retval = (-y); goto ret; } }
- else {
- /* Second stage tan */
- if ((y=c1+(cc1-u7.d*c1)) == c1+(cc1+u7.d*c1)) { retval = y; goto ret; } }
- retval = tanMp(x);
+ ffi = xfg[i][3].d;
+ EADD (z0, yya, z, zz);
+ MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
+ c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
+ ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
+ MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
+
+ ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
+ SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
+
+ if (n)
+ {
+ /* -cot */
+ DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
+ t10);
+ if ((y = c3 + (cc3 - u20.d * c3)) == c3 + (cc3 + u20.d * c3))
+ {
+ retval = (-sy * y);
+ goto ret;
+ }
+ }
+ else
+ {
+ /* tan */
+ DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
+ t10);
+ if ((y = c3 + (cc3 - u19.d * c3)) == c3 + (cc3 + u19.d * c3))
+ {
+ retval = (sy * y);
+ goto ret;
+ }
+ }
+ retval = tanMp (x);
goto ret;
}
-
- /* (VII) The case 0.787 < abs(x) <= 25, 0.0608 < abs(y) <= 0.787 */
-
- /* First stage */
- i = ((int) (mfftnhf.d+TWO8*ya));
- z = (z0=(ya-xfg[i][0].d))+yya; z2 = z*z;
- pz = z+z*z2*(e0.d+z2*e1.d);
- fi = xfg[i][1].d; gi = xfg[i][2].d;
-
- if (n) {
- /* -cot */
- t2 = pz*(fi+gi)/(fi+pz);
- if ((y=gi-(t2-gi*u10.d))==gi-(t2+gi*u10.d)) { retval = (-sy*y); goto ret; }
- t3 = (t2<0.0) ? -t2 : t2;
- t4 = gi*ua10.d+t3*ub10.d;
- if ((y=gi-(t2-t4))==gi-(t2+t4)) { retval = (-sy*y); goto ret; } }
- else {
- /* tan */
- t2 = pz*(gi+fi)/(gi-pz);
- if ((y=fi+(t2-fi*u9.d))==fi+(t2+fi*u9.d)) { retval = (sy*y); goto ret; }
- t3 = (t2<0.0) ? -t2 : t2;
- t4 = fi*ua9.d+t3*ub9.d;
- if ((y=fi+(t2-t4))==fi+(t2+t4)) { retval = (sy*y); goto ret; } }
-
- /* Second stage */
- ffi = xfg[i][3].d;
- EADD(z0,yya,z,zz)
- MUL2(z,zz,z,zz,z2,zz2,t1,t2,t3,t4,t5,t6,t7,t8)
- c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
- ADD2(a5.d,aa5.d,c1,0.0,c2,cc2,t1,t2)
- MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
- MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- MUL2(z ,zz ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(z ,zz ,c2,cc2,c1,cc1,t1,t2)
-
- ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
- MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
- SUB2(1.0,0.0,c3,cc3,c1,cc1,t1,t2)
-
- if (n) {
- /* -cot */
- DIV2(c1,cc1,c2,cc2,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
- if ((y=c3+(cc3-u12.d*c3))==c3+(cc3+u12.d*c3)) { retval = (-sy*y); goto ret; } }
- else {
- /* tan */
- DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
- if ((y=c3+(cc3-u11.d*c3))==c3+(cc3+u11.d*c3)) { retval = (sy*y); goto ret; } }
-
- retval = tanMp(x);
- goto ret;
- }
-
- /* (---) The case 25 < abs(x) <= 1e8 */
- if (w<=g5.d) {
- /* Range reduction by algorithm ii */
- t = (x*hpinv.d + toint.d);
- xn = t - toint.d;
- v.d = t;
- t1 = (x - xn*mp1.d) - xn*mp2.d;
- n =v.i[LOW_HALF] & 0x00000001;
- da = xn*pp3.d;
- t=t1-da;
- da = (t1-t)-da;
- t1 = xn*pp4.d;
- a = t - t1;
- da = ((t-a)-t1)+da;
- EADD(a,da,t1,t2) a=t1; da=t2;
- if (a<0.0) {ya=-a; yya=-da; sy=MONE;}
- else {ya= a; yya= da; sy= ONE;}
-
- /* (+++) The case 25 < abs(x) <= 1e8, abs(y) <= 1e-7 */
- if (ya<=gy1.d) { retval = tanMp(x); goto ret; }
-
- /* (VIII) The case 25 < abs(x) <= 1e8, 1e-7 < abs(y) <= 0.0608 */
- if (ya<=gy2.d) {
- a2 = a*a;
- t2 = da+a*a2*(d3.d+a2*(d5.d+a2*(d7.d+a2*(d9.d+a2*d11.d))));
- if (n) {
- /* First stage -cot */
- EADD(a,t2,b,db)
- DIV2(1.0,0.0,b,db,c,dc,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
- if ((y=c+(dc-u14.d*c))==c+(dc+u14.d*c)) { retval = (-y); goto ret; } }
- else {
- /* First stage tan */
- if ((y=a+(t2-u13.d*a))==a+(t2+u13.d*a)) { retval = y; goto ret; } }
-
- /* Second stage */
- MUL2(a,da,a,da,x2,xx2,t1,t2,t3,t4,t5,t6,t7,t8)
- c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
- x2*a27.d))))));
- ADD2(a13.d,aa13.d,c1,0.0,c2,cc2,t1,t2)
- MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
- MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
- MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
- MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
- MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
- MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- MUL2(a ,da ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a ,da ,c2,cc2,c1,cc1,t1,t2)
-
- if (n) {
- /* Second stage -cot */
- DIV2(1.0,0.0,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
- if ((y=c2+(cc2-u16.d*c2)) == c2+(cc2+u16.d*c2)) { retval = (-y); goto ret; } }
- else {
- /* Second stage tan */
- if ((y=c1+(cc1-u15.d*c1)) == c1+(cc1+u15.d*c1)) { retval = (y); goto ret; } }
- retval = tanMp(x);
- goto ret;
- }
-
- /* (IX) The case 25 < abs(x) <= 1e8, 0.0608 < abs(y) <= 0.787 */
- /* First stage */
- i = ((int) (mfftnhf.d+TWO8*ya));
- z = (z0=(ya-xfg[i][0].d))+yya; z2 = z*z;
- pz = z+z*z2*(e0.d+z2*e1.d);
- fi = xfg[i][1].d; gi = xfg[i][2].d;
-
- if (n) {
- /* -cot */
- t2 = pz*(fi+gi)/(fi+pz);
- if ((y=gi-(t2-gi*u18.d))==gi-(t2+gi*u18.d)) { retval = (-sy*y); goto ret; }
- t3 = (t2<0.0) ? -t2 : t2;
- t4 = gi*ua18.d+t3*ub18.d;
- if ((y=gi-(t2-t4))==gi-(t2+t4)) { retval = (-sy*y); goto ret; } }
- else {
- /* tan */
- t2 = pz*(gi+fi)/(gi-pz);
- if ((y=fi+(t2-fi*u17.d))==fi+(t2+fi*u17.d)) { retval = (sy*y); goto ret; }
- t3 = (t2<0.0) ? -t2 : t2;
- t4 = fi*ua17.d+t3*ub17.d;
- if ((y=fi+(t2-t4))==fi+(t2+t4)) { retval = (sy*y); goto ret; } }
-
- /* Second stage */
- ffi = xfg[i][3].d;
- EADD(z0,yya,z,zz)
- MUL2(z,zz,z,zz,z2,zz2,t1,t2,t3,t4,t5,t6,t7,t8)
- c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
- ADD2(a5.d,aa5.d,c1,0.0,c2,cc2,t1,t2)
- MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
- MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- MUL2(z ,zz ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(z ,zz ,c2,cc2,c1,cc1,t1,t2)
-
- ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
- MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
- SUB2(1.0,0.0,c3,cc3,c1,cc1,t1,t2)
-
- if (n) {
- /* -cot */
- DIV2(c1,cc1,c2,cc2,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
- if ((y=c3+(cc3-u20.d*c3))==c3+(cc3+u20.d*c3)) { retval = (-sy*y); goto ret; } }
- else {
- /* tan */
- DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
- if ((y=c3+(cc3-u19.d*c3))==c3+(cc3+u19.d*c3)) { retval = (sy*y); goto ret; } }
- retval = tanMp(x);
- goto ret;
- }
/* (---) The case 1e8 < abs(x) < 2**1024 */
/* Range reduction by algorithm iii */
- n = (__branred(x,&a,&da)) & 0x00000001;
- EADD(a,da,t1,t2) a=t1; da=t2;
- if (a<0.0) {ya=-a; yya=-da; sy=MONE;}
- else {ya= a; yya= da; sy= ONE;}
+ n = (__branred (x, &a, &da)) & 0x00000001;
+ EADD (a, da, t1, t2);
+ a = t1;
+ da = t2;
+ if (a < 0.0)
+ {
+ ya = -a;
+ yya = -da;
+ sy = -1;
+ }
+ else
+ {
+ ya = a;
+ yya = da;
+ sy = 1;
+ }
/* (+++) The case 1e8 < abs(x) < 2**1024, abs(y) <= 1e-7 */
- if (ya<=gy1.d) { retval = tanMp(x); goto ret; }
+ if (ya <= gy1.d)
+ {
+ retval = tanMp (x);
+ goto ret;
+ }
/* (X) The case 1e8 < abs(x) < 2**1024, 1e-7 < abs(y) <= 0.0608 */
- if (ya<=gy2.d) {
- a2 = a*a;
- t2 = da+a*a2*(d3.d+a2*(d5.d+a2*(d7.d+a2*(d9.d+a2*d11.d))));
- if (n) {
- /* First stage -cot */
- EADD(a,t2,b,db)
- DIV2(1.0,0.0,b,db,c,dc,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
- if ((y=c+(dc-u22.d*c))==c+(dc+u22.d*c)) { retval = (-y); goto ret; } }
- else {
- /* First stage tan */
- if ((y=a+(t2-u21.d*a))==a+(t2+u21.d*a)) { retval = y; goto ret; } }
-
- /* Second stage */
- /* Reduction by algorithm iv */
- p=10; n = (__mpranred(x,&mpa,p)) & 0x00000001;
- __mp_dbl(&mpa,&a,p); __dbl_mp(a,&mpt1,p);
- __sub(&mpa,&mpt1,&mpt2,p); __mp_dbl(&mpt2,&da,p);
-
- MUL2(a,da,a,da,x2,xx2,t1,t2,t3,t4,t5,t6,t7,t8)
- c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
- x2*a27.d))))));
- ADD2(a13.d,aa13.d,c1,0.0,c2,cc2,t1,t2)
- MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
- MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
- MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
- MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
- MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
- MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- MUL2(a ,da ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a ,da ,c2,cc2,c1,cc1,t1,t2)
-
- if (n) {
- /* Second stage -cot */
- DIV2(1.0,0.0,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
- if ((y=c2+(cc2-u24.d*c2)) == c2+(cc2+u24.d*c2)) { retval = (-y); goto ret; } }
- else {
- /* Second stage tan */
- if ((y=c1+(cc1-u23.d*c1)) == c1+(cc1+u23.d*c1)) { retval = y; goto ret; } }
- retval = tanMp(x);
- goto ret;
- }
+ if (ya <= gy2.d)
+ {
+ a2 = a * a;
+ t2 = d9.d + a2 * d11.d;
+ t2 = d7.d + a2 * t2;
+ t2 = d5.d + a2 * t2;
+ t2 = d3.d + a2 * t2;
+ t2 = da + a * a2 * t2;
+ if (n)
+ {
+ /* First stage -cot */
+ EADD (a, t2, b, db);
+ DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8, t9,
+ t10);
+ if ((y = c + (dc - u22.d * c)) == c + (dc + u22.d * c))
+ {
+ retval = (-y);
+ goto ret;
+ }
+ }
+ else
+ {
+ /* First stage tan */
+ if ((y = a + (t2 - u21.d * a)) == a + (t2 + u21.d * a))
+ {
+ retval = y;
+ goto ret;
+ }
+ }
+
+ /* Second stage */
+ /* Reduction by algorithm iv */
+ p = 10;
+ n = (__mpranred (x, &mpa, p)) & 0x00000001;
+ __mp_dbl (&mpa, &a, p);
+ __dbl_mp (a, &mpt1, p);
+ __sub (&mpa, &mpt1, &mpt2, p);
+ __mp_dbl (&mpt2, &da, p);
+
+ MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
+
+ c1 = a25.d + x2 * a27.d;
+ c1 = a23.d + x2 * c1;
+ c1 = a21.d + x2 * c1;
+ c1 = a19.d + x2 * c1;
+ c1 = a17.d + x2 * c1;
+ c1 = a15.d + x2 * c1;
+ c1 *= x2;
+
+ ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
+ MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
+
+ if (n)
+ {
+ /* Second stage -cot */
+ DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8,
+ t9, t10);
+ if ((y = c2 + (cc2 - u24.d * c2)) == c2 + (cc2 + u24.d * c2))
+ {
+ retval = (-y);
+ goto ret;
+ }
+ }
+ else
+ {
+ /* Second stage tan */
+ if ((y = c1 + (cc1 - u23.d * c1)) == c1 + (cc1 + u23.d * c1))
+ {
+ retval = y;
+ goto ret;
+ }
+ }
+ retval = tanMp (x);
+ goto ret;
+ }
/* (XI) The case 1e8 < abs(x) < 2**1024, 0.0608 < abs(y) <= 0.787 */
/* First stage */
- i = ((int) (mfftnhf.d+TWO8*ya));
- z = (z0=(ya-xfg[i][0].d))+yya; z2 = z*z;
- pz = z+z*z2*(e0.d+z2*e1.d);
- fi = xfg[i][1].d; gi = xfg[i][2].d;
-
- if (n) {
- /* -cot */
- t2 = pz*(fi+gi)/(fi+pz);
- if ((y=gi-(t2-gi*u26.d))==gi-(t2+gi*u26.d)) { retval = (-sy*y); goto ret; }
- t3 = (t2<0.0) ? -t2 : t2;
- t4 = gi*ua26.d+t3*ub26.d;
- if ((y=gi-(t2-t4))==gi-(t2+t4)) { retval = (-sy*y); goto ret; } }
- else {
- /* tan */
- t2 = pz*(gi+fi)/(gi-pz);
- if ((y=fi+(t2-fi*u25.d))==fi+(t2+fi*u25.d)) { retval = (sy*y); goto ret; }
- t3 = (t2<0.0) ? -t2 : t2;
- t4 = fi*ua25.d+t3*ub25.d;
- if ((y=fi+(t2-t4))==fi+(t2+t4)) { retval = (sy*y); goto ret; } }
+ i = ((int) (mfftnhf.d + TWO8 * ya));
+ z = (z0 = (ya - xfg[i][0].d)) + yya;
+ z2 = z * z;
+ pz = z + z * z2 * (e0.d + z2 * e1.d);
+ fi = xfg[i][1].d;
+ gi = xfg[i][2].d;
+
+ if (n)
+ {
+ /* -cot */
+ t2 = pz * (fi + gi) / (fi + pz);
+ if ((y = gi - (t2 - gi * u26.d)) == gi - (t2 + gi * u26.d))
+ {
+ retval = (-sy * y);
+ goto ret;
+ }
+ t3 = (t2 < 0.0) ? -t2 : t2;
+ t4 = gi * ua26.d + t3 * ub26.d;
+ if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
+ {
+ retval = (-sy * y);
+ goto ret;
+ }
+ }
+ else
+ {
+ /* tan */
+ t2 = pz * (gi + fi) / (gi - pz);
+ if ((y = fi + (t2 - fi * u25.d)) == fi + (t2 + fi * u25.d))
+ {
+ retval = (sy * y);
+ goto ret;
+ }
+ t3 = (t2 < 0.0) ? -t2 : t2;
+ t4 = fi * ua25.d + t3 * ub25.d;
+ if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
+ {
+ retval = (sy * y);
+ goto ret;
+ }
+ }
/* Second stage */
ffi = xfg[i][3].d;
- EADD(z0,yya,z,zz)
- MUL2(z,zz,z,zz,z2,zz2,t1,t2,t3,t4,t5,t6,t7,t8)
- c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
- ADD2(a5.d,aa5.d,c1,0.0,c2,cc2,t1,t2)
- MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
- MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
- MUL2(z ,zz ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
- ADD2(z ,zz ,c2,cc2,c1,cc1,t1,t2)
-
- ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
- MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
- SUB2(1.0,0.0,c3,cc3,c1,cc1,t1,t2)
-
- if (n) {
- /* -cot */
- DIV2(c1,cc1,c2,cc2,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
- if ((y=c3+(cc3-u28.d*c3))==c3+(cc3+u28.d*c3)) { retval = (-sy*y); goto ret; } }
- else {
- /* tan */
- DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
- if ((y=c3+(cc3-u27.d*c3))==c3+(cc3+u27.d*c3)) { retval = (sy*y); goto ret; } }
- retval = tanMp(x);
+ EADD (z0, yya, z, zz);
+ MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
+ c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
+ ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
+ MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
+ MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
+ ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
+
+ ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
+ MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
+ SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
+
+ if (n)
+ {
+ /* -cot */
+ DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
+ t10);
+ if ((y = c3 + (cc3 - u28.d * c3)) == c3 + (cc3 + u28.d * c3))
+ {
+ retval = (-sy * y);
+ goto ret;
+ }
+ }
+ else
+ {
+ /* tan */
+ DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
+ t10);
+ if ((y = c3 + (cc3 - u27.d * c3)) == c3 + (cc3 + u27.d * c3))
+ {
+ retval = (sy * y);
+ goto ret;
+ }
+ }
+ retval = tanMp (x);
goto ret;
- ret:
+ret:
return retval;
}
@@ -507,14 +830,14 @@
/* and converts result back to double */
static double
SECTION
-tanMp(double x)
+tanMp (double x)
{
int p;
double y;
mp_no mpy;
- p=32;
- __mptan(x, &mpy, p);
- __mp_dbl(&mpy,&y,p);
+ p = 32;
+ __mptan (x, &mpy, p);
+ __mp_dbl (&mpy, &y, p);
return y;
}
Modified: fsf/trunk/libc/sysdeps/powerpc/power4/fpu/mpa-arch.h
==============================================================================
--- fsf/trunk/libc/sysdeps/powerpc/power4/fpu/mpa-arch.h (original)
+++ fsf/trunk/libc/sysdeps/powerpc/power4/fpu/mpa-arch.h Sat Mar 30 00:01:57 2013
@@ -40,7 +40,7 @@
({ \
double u = ((x) + TWO52) - TWO52; \
if (u > (x)) \
- u -= ONE; \
+ u -= 1; \
(r) = u; \
(x) -= u; \
})
Modified: fsf/trunk/libc/sysdeps/powerpc/power4/fpu/mpa.c
==============================================================================
--- fsf/trunk/libc/sysdeps/powerpc/power4/fpu/mpa.c (original)
+++ fsf/trunk/libc/sysdeps/powerpc/power4/fpu/mpa.c Sat Mar 30 00:01:57 2013
@@ -35,15 +35,15 @@
double u, zk, zk2;
/* Is z=0? */
- if (__glibc_unlikely (X[0] * Y[0] == ZERO))
- {
- Z[0] = ZERO;
+ if (__glibc_unlikely (X[0] * Y[0] == 0))
+ {
+ Z[0] = 0;
return;
}
/* Multiply, add and carry */
k2 = (p2 < 3) ? p2 + p2 : p2 + 3;
- zk = Z[k2] = ZERO;
+ zk = Z[k2] = 0;
for (k = k2; k > 1;)
{
if (k > p2)
@@ -101,7 +101,7 @@
int e = EX + EY;
/* Is there a carry beyond the most significant digit? */
- if (Z[1] == ZERO)
+ if (Z[1] == 0)
{
for (i = 1; i <= p2; i++)
Z[i] = Z[i + 1];
@@ -123,24 +123,24 @@
double u, yk;
/* Is z=0? */
- if (__glibc_unlikely (X[0] == ZERO))
- {
- Y[0] = ZERO;
+ if (__glibc_unlikely (X[0] == 0))
+ {
+ Y[0] = 0;
return;
}
/* We need not iterate through all X's since it's pointless to
multiply zeroes. */
for (ip = p; ip > 0; ip--)
- if (X[ip] != ZERO)
+ if (X[ip] != 0)
break;
k = (__glibc_unlikely (p < 3)) ? p + p : p + 3;
while (k > 2 * ip + 1)
- Y[k--] = ZERO;
-
- yk = ZERO;
+ Y[k--] = 0;
+
+ yk = 0;
while (k > p)
{
@@ -204,7 +204,7 @@
int e = EX * 2;
/* Is there a carry beyond the most significant digit? */
- if (__glibc_unlikely (Y[1] == ZERO))
+ if (__glibc_unlikely (Y[1] == 0))
{
for (i = 1; i <= p; i++)
Y[i] = Y[i + 1];
_______________________________________________
Commits mailing list
Commits@xxxxxxxxxx
http://eglibc.org/cgi-bin/mailman/listinfo/commits