[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Commits] r15585 - in /trunk: ./ libc/ libc/math/ libc/sysdeps/i386/fpu/ libc/sysdeps/ieee754/dbl-64/ libc/sysdeps/ieee754/flt-32/ lib...
- To: commits@xxxxxxxxxx
- Subject: [Commits] r15585 - in /trunk: ./ libc/ libc/math/ libc/sysdeps/i386/fpu/ libc/sysdeps/ieee754/dbl-64/ libc/sysdeps/ieee754/flt-32/ lib...
- From: joseph@xxxxxxxxxx
- Date: Wed, 26 Oct 2011 16:46:38 -0000
Author: joseph
Date: Wed Oct 26 16:46:37 2011
New Revision: 15585
Log:
Merge changes between r15557 and r15584 from /fsf/trunk.
Added:
trunk/libc/sysdeps/x86_64/fpu/multiarch/brandred-avx.c
- copied unchanged from r15584, fsf/trunk/libc/sysdeps/x86_64/fpu/multiarch/brandred-avx.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/doasin-avx.c
- copied unchanged from r15584, fsf/trunk/libc/sysdeps/x86_64/fpu/multiarch/doasin-avx.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/dosincos-avx.c
- copied unchanged from r15584, fsf/trunk/libc/sysdeps/x86_64/fpu/multiarch/dosincos-avx.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/e_asin-avx.c
- copied unchanged from r15584, fsf/trunk/libc/sysdeps/x86_64/fpu/multiarch/e_asin-avx.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/e_atan2-avx.c
- copied unchanged from r15584, fsf/trunk/libc/sysdeps/x86_64/fpu/multiarch/e_atan2-avx.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/e_exp-avx.c
- copied unchanged from r15584, fsf/trunk/libc/sysdeps/x86_64/fpu/multiarch/e_exp-avx.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/e_log-avx.c
- copied unchanged from r15584, fsf/trunk/libc/sysdeps/x86_64/fpu/multiarch/e_log-avx.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/mpa-avx.c
- copied unchanged from r15584, fsf/trunk/libc/sysdeps/x86_64/fpu/multiarch/mpa-avx.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/mpatan-avx.c
- copied unchanged from r15584, fsf/trunk/libc/sysdeps/x86_64/fpu/multiarch/mpatan-avx.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/mpatan2-avx.c
- copied unchanged from r15584, fsf/trunk/libc/sysdeps/x86_64/fpu/multiarch/mpatan2-avx.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/mpexp-avx.c
- copied unchanged from r15584, fsf/trunk/libc/sysdeps/x86_64/fpu/multiarch/mpexp-avx.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/mplog-avx.c
- copied unchanged from r15584, fsf/trunk/libc/sysdeps/x86_64/fpu/multiarch/mplog-avx.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/mpsqrt-avx.c
- copied unchanged from r15584, fsf/trunk/libc/sysdeps/x86_64/fpu/multiarch/mpsqrt-avx.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/mptan-avx.c
- copied unchanged from r15584, fsf/trunk/libc/sysdeps/x86_64/fpu/multiarch/mptan-avx.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/s_atan-avx.c
- copied unchanged from r15584, fsf/trunk/libc/sysdeps/x86_64/fpu/multiarch/s_atan-avx.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/s_sin-avx.c
- copied unchanged from r15584, fsf/trunk/libc/sysdeps/x86_64/fpu/multiarch/s_sin-avx.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/s_tan-avx.c
- copied unchanged from r15584, fsf/trunk/libc/sysdeps/x86_64/fpu/multiarch/s_tan-avx.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/sincos32-avx.c
- copied unchanged from r15584, fsf/trunk/libc/sysdeps/x86_64/fpu/multiarch/sincos32-avx.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/slowexp-avx.c
- copied unchanged from r15584, fsf/trunk/libc/sysdeps/x86_64/fpu/multiarch/slowexp-avx.c
trunk/ports/sysdeps/m68k/m680x0/fpu/math_private.h
- copied unchanged from r15584, fsf/trunk/ports/sysdeps/m68k/m680x0/fpu/math_private.h
trunk/ports/sysdeps/m68k/m680x0/fpu/sincostab.c
- copied unchanged from r15584, fsf/trunk/ports/sysdeps/m68k/m680x0/fpu/sincostab.c
Modified:
trunk/ (props changed)
trunk/libc/ChangeLog
trunk/libc/math/math_private.h
trunk/libc/sysdeps/i386/fpu/math_private.h
trunk/libc/sysdeps/ieee754/dbl-64/e_rem_pio2.c
trunk/libc/sysdeps/ieee754/dbl-64/s_round.c
trunk/libc/sysdeps/ieee754/flt-32/e_j0f.c
trunk/libc/sysdeps/ieee754/flt-32/s_roundf.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/Makefile
trunk/libc/sysdeps/x86_64/fpu/multiarch/e_asin.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/e_atan2.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/e_exp.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/e_log.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/s_atan.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/s_sin.c
trunk/libc/sysdeps/x86_64/fpu/multiarch/s_tan.c
trunk/libc/sysdeps/x86_64/multiarch/init-arch.h
trunk/libc/sysdeps/x86_64/multiarch/strcmp-sse42.S
trunk/ports/ChangeLog.arm
trunk/ports/ChangeLog.m68k
trunk/ports/sysdeps/arm/dl-machine.h
trunk/ports/sysdeps/unix/arm/sysdep.S
Propchange: trunk/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Oct 26 16:46:37 2011
@@ -1,1 +1,1 @@
-/fsf/trunk:15224-15557
+/fsf/trunk:15224-15584
Modified: trunk/libc/ChangeLog
==============================================================================
--- trunk/libc/ChangeLog (original)
+++ trunk/libc/ChangeLog Wed Oct 26 16:46:37 2011
@@ -1,3 +1,53 @@
+2011-10-26 Andreas Schwab <schwab@xxxxxxxxxx>
+
+ * sysdeps/ieee754/flt-32/e_j0f.c: Fix use of math_force_eval.
+ * sysdeps/ieee754/dbl-64/s_round.c: Likewise.
+ * sysdeps/ieee754/flt-32/s_roundf.c: Likewise.
+
+ * math/math_private.h (math_force_eval): Allow non-addressable
+ arguments.
+ * sysdeps/i386/fpu/math_private.h (math_force_eval): Likewise.
+
+2011-10-25 Ulrich Drepper <drepper@xxxxxxxxx>
+
+ * sysdeps/ieee754/dbl-64/e_rem_pio2.c: Comment everything out, the
+ file is not needed.
+
+ * sysdeps/x86_64/fpu/multiarch/e_asin.c: Support AVX variants.
+ * sysdeps/x86_64/fpu/multiarch/e_atan2.c: Likewise.
+ * sysdeps/x86_64/fpu/multiarch/e_exp.c: Likewise.
+ * sysdeps/x86_64/fpu/multiarch/e_log.c: Likewise.
+ * sysdeps/x86_64/fpu/multiarch/s_atan.c: Likewise.
+ * sysdeps/x86_64/fpu/multiarch/s_sin.c: Likewise.
+ * sysdeps/x86_64/fpu/multiarch/s_tan.c: Likewise.
+ * sysdeps/x86_64/fpu/multiarch/Makefile: Fix some CFLAGS-* variables.
+ Add AVX variants.
+ * sysdeps/x86_64/fpu/multiarch/brandred-avx.c: New file.
+ * sysdeps/x86_64/fpu/multiarch/doasin-avx.c: New file.
+ * sysdeps/x86_64/fpu/multiarch/dosincos-avx.c: New file.
+ * sysdeps/x86_64/fpu/multiarch/e_asin-avx.c: New file.
+ * sysdeps/x86_64/fpu/multiarch/e_atan2-avx.c: New file.
+ * sysdeps/x86_64/fpu/multiarch/e_exp-avx.c: New file.
+ * sysdeps/x86_64/fpu/multiarch/e_log-avx.c: New file.
+ * sysdeps/x86_64/fpu/multiarch/mpa-avx.c: New file.
+ * sysdeps/x86_64/fpu/multiarch/mpatan-avx.c: New file.
+ * sysdeps/x86_64/fpu/multiarch/mpatan2-avx.c: New file.
+ * sysdeps/x86_64/fpu/multiarch/mpexp-avx.c: New file.
+ * sysdeps/x86_64/fpu/multiarch/mplog-avx.c: New file.
+ * sysdeps/x86_64/fpu/multiarch/mpsqrt-avx.c: New file.
+ * sysdeps/x86_64/fpu/multiarch/mptan-avx.c: New file.
+ * sysdeps/x86_64/fpu/multiarch/s_atan-avx.c: New file.
+ * sysdeps/x86_64/fpu/multiarch/s_sin-avx.c: New file.
+ * sysdeps/x86_64/fpu/multiarch/s_tan-avx.c: New file.
+ * sysdeps/x86_64/fpu/multiarch/sincos32-avx.c: New file.
+ * sysdeps/x86_64/fpu/multiarch/slowexp-avx.c: New file.
+
+ * sysdeps/x86_64/multiarch/init-arch.h: Make bit_* macros available
+ all the time. Define bit_AVX. Define HAS_* macros using bit_* macros.
+
+ * sysdeps/x86_64/multiarch/strcmp-sse42.S: Move common code to earlier
+ place. Use VEX encoding when compiling for AVX.
+
2011-10-25 Andreas Schwab <schwab@xxxxxxxxxx>
* wcsmbs/wcscmp.c (WCSCMP): Compare as wchar_t, not wint_t.
@@ -8,7 +58,7 @@
2011-10-25 Ulrich Drepper <drepper@xxxxxxxxx>
* sysdeps/ieee754/dbl-64/e_atanh.c: Use math_force_eval instead of a
- useful if() expression.
+ useless if() expression.
* sysdeps/ieee754/dbl-64/e_j0.c: Likewise.
* sysdeps/ieee754/dbl-64/s_ceil.c: Likewise.
* sysdeps/ieee754/dbl-64/s_expm1.c: Likewise.
Modified: trunk/libc/math/math_private.h
==============================================================================
--- trunk/libc/math/math_private.h (original)
+++ trunk/libc/math/math_private.h Wed Oct 26 16:46:37 2011
@@ -354,8 +354,9 @@
#ifndef math_opt_barrier
#define math_opt_barrier(x) \
-({ __typeof (x) __x = x; __asm ("" : "+m" (__x)); __x; })
-#define math_force_eval(x) __asm __volatile ("" : : "m" (x))
+({ __typeof (x) __x = (x); __asm ("" : "+m" (__x)); __x; })
+#define math_force_eval(x) \
+({ __typeof (x) __x = (x); __asm __volatile ("" : : "m" (__x)); })
#endif
Modified: trunk/libc/sysdeps/i386/fpu/math_private.h
==============================================================================
--- trunk/libc/sysdeps/i386/fpu/math_private.h (original)
+++ trunk/libc/sysdeps/i386/fpu/math_private.h Wed Oct 26 16:46:37 2011
@@ -1,16 +1,17 @@
#ifndef _MATH_PRIVATE_H
#define math_opt_barrier(x) \
-({ __typeof(x) __x; \
+({ __typeof (x) __x; \
__asm ("" : "=t" (__x) : "0" (x)); \
__x; })
#define math_force_eval(x) \
do \
{ \
+ __typeof (x) __x = (x); \
if (sizeof (x) <= sizeof (double)) \
- __asm __volatile ("" : : "m" (x)); \
+ __asm __volatile ("" : : "m" (__x)); \
else \
- __asm __volatile ("" : : "f" (x)); \
+ __asm __volatile ("" : : "f" (__x)); \
} \
while (0)
Modified: trunk/libc/sysdeps/ieee754/dbl-64/e_rem_pio2.c
==============================================================================
--- trunk/libc/sysdeps/ieee754/dbl-64/e_rem_pio2.c (original)
+++ trunk/libc/sysdeps/ieee754/dbl-64/e_rem_pio2.c Wed Oct 26 16:46:37 2011
@@ -1,22 +1,19 @@
-/* @(#)e_rem_pio2.c 5.1 93/09/24 */
+#ifdef NOT_NEEDED_ANYMORE
+
/*
* ====================================================
* Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
*
* Developed at SunPro, a Sun Microsystems, Inc. business.
* Permission to use, copy, modify, and distribute this
- * software is freely granted, provided that this notice
+ * software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
-#if defined(LIBM_SCCS) && !defined(lint)
-static char rcsid[] = "$NetBSD: e_rem_pio2.c,v 1.8 1995/05/10 20:46:02 jtc Exp $";
-#endif
-
/* __ieee754_rem_pio2(x,y)
- *
- * return the remainder of x rem pi/2 in y[0]+y[1]
+ *
+ * return the remainder of x rem pi/2 in y[0]+y[1]
* use __kernel_rem_pio2()
*/
@@ -24,31 +21,23 @@
#include "math_private.h"
/*
- * Table of constants for 2/pi, 396 Hex digits (476 decimal) of 2/pi
+ * Table of constants for 2/pi, 396 Hex digits (476 decimal) of 2/pi
*/
-#ifdef __STDC__
static const int32_t two_over_pi[] = {
-#else
-static int32_t two_over_pi[] = {
-#endif
-0xA2F983, 0x6E4E44, 0x1529FC, 0x2757D1, 0xF534DD, 0xC0DB62,
-0x95993C, 0x439041, 0xFE5163, 0xABDEBB, 0xC561B7, 0x246E3A,
-0x424DD2, 0xE00649, 0x2EEA09, 0xD1921C, 0xFE1DEB, 0x1CB129,
-0xA73EE8, 0x8235F5, 0x2EBB44, 0x84E99C, 0x7026B4, 0x5F7E41,
-0x3991D6, 0x398353, 0x39F49C, 0x845F8B, 0xBDF928, 0x3B1FF8,
-0x97FFDE, 0x05980F, 0xEF2F11, 0x8B5A0A, 0x6D1F6D, 0x367ECF,
-0x27CB09, 0xB74F46, 0x3F669E, 0x5FEA2D, 0x7527BA, 0xC7EBE5,
-0xF17B3D, 0x0739F7, 0x8A5292, 0xEA6BFB, 0x5FB11F, 0x8D5D08,
-0x560330, 0x46FC7B, 0x6BABF0, 0xCFBC20, 0x9AF436, 0x1DA9E3,
-0x91615E, 0xE61B08, 0x659985, 0x5F14A0, 0x68408D, 0xFFD880,
-0x4D7327, 0x310606, 0x1556CA, 0x73A8C9, 0x60E27B, 0xC08C6B,
+0xA2F983, 0x6E4E44, 0x1529FC, 0x2757D1, 0xF534DD, 0xC0DB62,
+0x95993C, 0x439041, 0xFE5163, 0xABDEBB, 0xC561B7, 0x246E3A,
+0x424DD2, 0xE00649, 0x2EEA09, 0xD1921C, 0xFE1DEB, 0x1CB129,
+0xA73EE8, 0x8235F5, 0x2EBB44, 0x84E99C, 0x7026B4, 0x5F7E41,
+0x3991D6, 0x398353, 0x39F49C, 0x845F8B, 0xBDF928, 0x3B1FF8,
+0x97FFDE, 0x05980F, 0xEF2F11, 0x8B5A0A, 0x6D1F6D, 0x367ECF,
+0x27CB09, 0xB74F46, 0x3F669E, 0x5FEA2D, 0x7527BA, 0xC7EBE5,
+0xF17B3D, 0x0739F7, 0x8A5292, 0xEA6BFB, 0x5FB11F, 0x8D5D08,
+0x560330, 0x46FC7B, 0x6BABF0, 0xCFBC20, 0x9AF436, 0x1DA9E3,
+0x91615E, 0xE61B08, 0x659985, 0x5F14A0, 0x68408D, 0xFFD880,
+0x4D7327, 0x310606, 0x1556CA, 0x73A8C9, 0x60E27B, 0xC08C6B,
};
-#ifdef __STDC__
static const int32_t npio2_hw[] = {
-#else
-static int32_t npio2_hw[] = {
-#endif
0x3FF921FB, 0x400921FB, 0x4012D97C, 0x401921FB, 0x401F6A7A, 0x4022D97C,
0x4025FDBB, 0x402921FB, 0x402C463A, 0x402F6A7A, 0x4031475C, 0x4032D97C,
0x40346B9C, 0x4035FDBB, 0x40378FDB, 0x403921FB, 0x403AB41B, 0x403C463A,
@@ -67,11 +56,7 @@
* pio2_3t: pi/2 - (pio2_1+pio2_2+pio2_3)
*/
-#ifdef __STDC__
-static const double
-#else
-static double
-#endif
+static const double
zero = 0.00000000000000000000e+00, /* 0x00000000, 0x00000000 */
half = 5.00000000000000000000e-01, /* 0x3FE00000, 0x00000000 */
two24 = 1.67772160000000000000e+07, /* 0x41700000, 0x00000000 */
@@ -83,12 +68,8 @@
pio2_3 = 2.02226624871116645580e-21, /* 0x3BA3198A, 0x2E000000 */
pio2_3t = 8.47842766036889956997e-32; /* 0x397B839A, 0x252049C1 */
-#ifdef __STDC__
- int32_t __ieee754_rem_pio2(double x, double *y)
-#else
- int32_t __ieee754_rem_pio2(x,y)
- double x,y[];
-#endif
+int32_t
+__ieee754_rem_pio2(double x, double *y)
{
double z,w,t,r,fn;
double tx[3];
@@ -100,9 +81,9 @@
if(ix<=0x3fe921fb) /* |x| ~<= pi/4 , no need for reduction */
{y[0] = x; y[1] = 0; return 0;}
if(ix<0x4002d97c) { /* |x| < 3pi/4, special case with n=+-1 */
- if(hx>0) {
+ if(hx>0) {
z = x - pio2_1;
- if(ix!=0x3ff921fb) { /* 33+53 bit pi is good enough */
+ if(ix!=0x3ff921fb) { /* 33+53 bit pi is good enough */
y[0] = z - pio2_1t;
y[1] = (z-y[0])-pio2_1t;
} else { /* near pi/2, use 33+33+53 bit pi */
@@ -113,7 +94,7 @@
return 1;
} else { /* negative x */
z = x + pio2_1;
- if(ix!=0x3ff921fb) { /* 33+53 bit pi is good enough */
+ if(ix!=0x3ff921fb) { /* 33+53 bit pi is good enough */
y[0] = z + pio2_1t;
y[1] = (z-y[0])+pio2_1t;
} else { /* near pi/2, use 33+33+53 bit pi */
@@ -130,36 +111,36 @@
fn = (double)n;
r = t-fn*pio2_1;
w = fn*pio2_1t; /* 1st round good to 85 bit */
- if(n<32&&ix!=npio2_hw[n-1]) {
+ if(n<32&&ix!=npio2_hw[n-1]) {
y[0] = r-w; /* quick check no cancellation */
} else {
- u_int32_t high;
- j = ix>>20;
- y[0] = r-w;
+ u_int32_t high;
+ j = ix>>20;
+ y[0] = r-w;
GET_HIGH_WORD(high,y[0]);
- i = j-((high>>20)&0x7ff);
- if(i>16) { /* 2nd iteration needed, good to 118 */
+ i = j-((high>>20)&0x7ff);
+ if(i>16) { /* 2nd iteration needed, good to 118 */
t = r;
- w = fn*pio2_2;
+ w = fn*pio2_2;
r = t-w;
- w = fn*pio2_2t-((t-r)-w);
+ w = fn*pio2_2t-((t-r)-w);
y[0] = r-w;
GET_HIGH_WORD(high,y[0]);
i = j-((high>>20)&0x7ff);
if(i>49) { /* 3rd iteration need, 151 bits acc */
- t = r; /* will cover all possible cases */
- w = fn*pio2_3;
- r = t-w;
- w = fn*pio2_3t-((t-r)-w);
- y[0] = r-w;
+ t = r; /* will cover all possible cases */
+ w = fn*pio2_3;
+ r = t-w;
+ w = fn*pio2_3t-((t-r)-w);
+ y[0] = r-w;
}
}
}
y[1] = (r-y[0])-w;
- if(hx<0) {y[0] = -y[0]; y[1] = -y[1]; return -n;}
+ if(hx<0) {y[0] = -y[0]; y[1] = -y[1]; return -n;}
else return n;
}
- /*
+ /*
* all other (large) arguments
*/
if(ix>=0x7ff00000) { /* x is inf or NaN */
@@ -168,7 +149,7 @@
/* set z = scalbn(|x|,ilogb(x)-23) */
GET_LOW_WORD(low,x);
SET_LOW_WORD(z,low);
- e0 = (ix>>20)-1046; /* e0 = ilogb(z)-23; */
+ e0 = (ix>>20)-1046; /* e0 = ilogb(z)-23; */
SET_HIGH_WORD(z, ix - ((int32_t)(e0<<20)));
for(i=0;i<2;i++) {
tx[i] = (double)((int32_t)(z));
@@ -181,3 +162,5 @@
if(hx<0) {y[0] = -y[0]; y[1] = -y[1]; return -n;}
return n;
}
+
+#endif
Modified: trunk/libc/sysdeps/ieee754/dbl-64/s_round.c
==============================================================================
--- trunk/libc/sysdeps/ieee754/dbl-64/s_round.c (original)
+++ trunk/libc/sysdeps/ieee754/dbl-64/s_round.c Wed Oct 26 16:46:37 2011
@@ -38,7 +38,7 @@
{
if (j0 < 0)
{
- math_force_eval (huge + x > 0.0);
+ math_force_eval (huge + x);
i0 &= 0x80000000;
if (j0 == -1)
@@ -51,7 +51,7 @@
if (((i0 & i) | i1) == 0)
/* X is integral. */
return x;
- math_force_eval (huge + x > 0.0);
+ math_force_eval (huge + x);
/* Raise inexact if x != 0. */
i0 += 0x00080000 >> j0;
@@ -74,7 +74,7 @@
/* X is integral. */
return x;
- math_force_eval (huge + x > 0.0);
+ math_force_eval (huge + x);
/* Raise inexact if x != 0. */
u_int32_t j = i1 + (1 << (51 - j0));
Modified: trunk/libc/sysdeps/ieee754/flt-32/e_j0f.c
==============================================================================
--- trunk/libc/sysdeps/ieee754/flt-32/e_j0f.c (original)
+++ trunk/libc/sysdeps/ieee754/flt-32/e_j0f.c Wed Oct 26 16:46:37 2011
@@ -66,7 +66,7 @@
return z;
}
if(ix<0x39000000) { /* |x| < 2**-13 */
- math_force_eval(huge+x>one); /* raise inexact if x != 0 */
+ math_force_eval(huge+x); /* raise inexact if x != 0 */
if(ix<0x32000000) return one; /* |x|<2**-27 */
else return one - (float)0.25*x*x;
}
Modified: trunk/libc/sysdeps/ieee754/flt-32/s_roundf.c
==============================================================================
--- trunk/libc/sysdeps/ieee754/flt-32/s_roundf.c (original)
+++ trunk/libc/sysdeps/ieee754/flt-32/s_roundf.c Wed Oct 26 16:46:37 2011
@@ -37,7 +37,7 @@
{
if (j0 < 0)
{
- math_force_eval (huge + x > 0.0F);
+ math_force_eval (huge + x);
i0 &= 0x80000000;
if (j0 == -1)
@@ -49,7 +49,7 @@
if ((i0 & i) == 0)
/* X is integral. */
return x;
- math_force_eval (huge + x > 0.0F);
+ math_force_eval (huge + x);
/* Raise inexact if x != 0. */
i0 += 0x00400000 >> j0;
Modified: trunk/libc/sysdeps/x86_64/fpu/multiarch/Makefile
==============================================================================
--- trunk/libc/sysdeps/x86_64/fpu/multiarch/Makefile (original)
+++ trunk/libc/sysdeps/x86_64/fpu/multiarch/Makefile Wed Oct 26 16:46:37 2011
@@ -30,7 +30,36 @@
CFLAGS-sincos32-fma4.c = -mfma4
CFLAGS-slowexp-fma4.c = -mfma4
CFLAGS-slowpow-fma4.c = -mfma4
-CLFAGS-s_sin-fma4.c = -mfma4
-CLFAGS-s_tan-fma4.c = -mfma4
+CFLAGS-s_sin-fma4.c = -mfma4
+CFLAGS-s_tan-fma4.c = -mfma4
+endif
+
+ifeq ($(config-cflags-avx),yes)
+libm-sysdep_routines += e_exp-avx e_log-avx s_atan-avx \
+ e_asin-avx e_atan2-avx s_sin-avx s_tan-avx \
+ mplog-avx mpa-avx slowexp-avx \
+ sincos32-avx doasin-avx dosincos-avx \
+ brandred-avx mpexp-avx \
+ mpatan2-avx mpatan-avx mpsqrt-avx mptan-avx
+
+CFLAGS-brandred-avx.c = -mavx
+CFLAGS-doasin-avx.c = -mavx
+CFLAGS-dosincos-avx.c = -mavx
+CFLAGS-e_asin-avx.c = -mavx
+CFLAGS-e_atan2-avx.c = -mavx
+CFLAGS-e_exp-avx.c = -mavx
+CFLAGS-e_log-avx.c = -mavx
+CFLAGS-mpa-avx.c = -mavx
+CFLAGS-mpatan-avx.c = -mavx
+CFLAGS-mpatan2-avx.c = -mavx
+CFLAGS-mpexp-avx.c = -mavx
+CFLAGS-mplog-avx.c = -mavx
+CFLAGS-mpsqrt-avx.c = -mavx
+CFLAGS-mptan-avx.c = -mavx
+CFLAGS-s_atan-avx.c = -mavx
+CFLAGS-s_sin-avx.c = -mavx
+CFLAGS-sincos32-avx.c = -mavx
+CFLAGS-slowexp-avx.c = -mavx
+CFLAGS-s_tan-avx.c = -mavx
endif
endif
Modified: trunk/libc/sysdeps/x86_64/fpu/multiarch/e_asin.c
==============================================================================
--- trunk/libc/sysdeps/x86_64/fpu/multiarch/e_asin.c (original)
+++ trunk/libc/sysdeps/x86_64/fpu/multiarch/e_asin.c Wed Oct 26 16:46:37 2011
@@ -1,18 +1,29 @@
-#ifdef HAVE_FMA4_SUPPORT
+#if defined HAVE_FMA4_SUPPORT || defined HAVE_AVX_SUPPORT
# include <init-arch.h>
# include <math_private.h>
extern double __ieee754_acos_sse2 (double);
+extern double __ieee754_asin_sse2 (double);
+extern double __ieee754_acos_avx (double);
+extern double __ieee754_asin_avx (double);
+# ifdef HAVE_FMA4_SUPPORT
extern double __ieee754_acos_fma4 (double);
-extern double __ieee754_asin_sse2 (double);
extern double __ieee754_asin_fma4 (double);
+# else
+# undef HAS_FMA4
+# define HAS_FMA4 0
+# define __ieee754_acos_fma4 ((void *) 0)
+# define __ieee754_asin_fma4 ((void *) 0)
+# endif
libm_ifunc (__ieee754_acos,
- HAS_FMA4 ? __ieee754_acos_fma4 : __ieee754_acos_sse2);
+ HAS_FMA4 ? __ieee754_acos_fma4
+ : (HAS_AVX ? __ieee754_acos_avx : __ieee754_acos_sse2));
strong_alias (__ieee754_acos, __acos_finite)
libm_ifunc (__ieee754_asin,
- HAS_FMA4 ? __ieee754_asin_fma4 : __ieee754_asin_sse2);
+ HAS_FMA4 ? __ieee754_asin_fma4
+ : (HAS_AVX ? __ieee754_asin_avx : __ieee754_asin_sse2));
strong_alias (__ieee754_asin, __asin_finite)
# define __ieee754_acos __ieee754_acos_sse2
Modified: trunk/libc/sysdeps/x86_64/fpu/multiarch/e_atan2.c
==============================================================================
--- trunk/libc/sysdeps/x86_64/fpu/multiarch/e_atan2.c (original)
+++ trunk/libc/sysdeps/x86_64/fpu/multiarch/e_atan2.c Wed Oct 26 16:46:37 2011
@@ -1,12 +1,20 @@
-#ifdef HAVE_FMA4_SUPPORT
+#if defined HAVE_FMA4_SUPPORT || defined HAVE_AVX_SUPPORT
# include <init-arch.h>
# include <math_private.h>
extern double __ieee754_atan2_sse2 (double, double);
+extern double __ieee754_atan2_avx (double, double);
+# ifdef HAVE_FMA4_SUPPORT
extern double __ieee754_atan2_fma4 (double, double);
+# else
+# undef HAS_FMA4
+# define HAS_FMA4 0
+# define __ieee754_atan2_fma4 ((void *) 0)
+# endif
libm_ifunc (__ieee754_atan2,
- HAS_FMA4 ? __ieee754_atan2_fma4 : __ieee754_atan2_sse2);
+ HAS_FMA4 ? __ieee754_atan2_fma4
+ : (HAS_AVX ? __ieee754_atan2_avx : __ieee754_atan2_sse2));
strong_alias (__ieee754_atan2, __atan2_finite)
# define __ieee754_atan2 __ieee754_atan2_sse2
Modified: trunk/libc/sysdeps/x86_64/fpu/multiarch/e_exp.c
==============================================================================
--- trunk/libc/sysdeps/x86_64/fpu/multiarch/e_exp.c (original)
+++ trunk/libc/sysdeps/x86_64/fpu/multiarch/e_exp.c Wed Oct 26 16:46:37 2011
@@ -1,11 +1,20 @@
-#ifdef HAVE_FMA4_SUPPORT
+#if defined HAVE_FMA4_SUPPORT || defined HAVE_AVX_SUPPORT
# include <init-arch.h>
# include <math_private.h>
extern double __ieee754_exp_sse2 (double);
+extern double __ieee754_exp_avx (double);
+# ifdef HAVE_FMA4_SUPPORT
extern double __ieee754_exp_fma4 (double);
+# else
+# undef HAS_FMA4
+# define HAS_FMA4 0
+# define __ieee754_exp_fma4 ((void *) 0)
+# endif
-libm_ifunc (__ieee754_exp, HAS_FMA4 ? __ieee754_exp_fma4 : __ieee754_exp_sse2);
+libm_ifunc (__ieee754_exp,
+ HAS_FMA4 ? __ieee754_exp_fma4
+ : (HAS_AVX ? __ieee754_exp_avx : __ieee754_exp_sse2));
strong_alias (__ieee754_exp, __exp_finite)
# define __ieee754_exp __ieee754_exp_sse2
Modified: trunk/libc/sysdeps/x86_64/fpu/multiarch/e_log.c
==============================================================================
--- trunk/libc/sysdeps/x86_64/fpu/multiarch/e_log.c (original)
+++ trunk/libc/sysdeps/x86_64/fpu/multiarch/e_log.c Wed Oct 26 16:46:37 2011
@@ -1,11 +1,21 @@
-#ifdef HAVE_FMA4_SUPPORT
+#if defined HAVE_FMA4_SUPPORT || defined HAVE_AVX_SUPPORT
# include <init-arch.h>
# include <math_private.h>
extern double __ieee754_log_sse2 (double);
+extern double __ieee754_log_avx (double);
+# ifdef HAVE_FMA4_SUPPORT
extern double __ieee754_log_fma4 (double);
+# else
+# undef HAS_FMA4
+# define HAS_FMA4 0
+# define __ieee754_log_fma4 ((void *) 0)
+# endif
-libm_ifunc (__ieee754_log, HAS_FMA4 ? __ieee754_log_fma4 : __ieee754_log_sse2);
+libm_ifunc (__ieee754_log,
+ HAS_FMA4 ? __ieee754_log_fma4
+ : (HAS_AVX ? __ieee754_log_avx
+ : __ieee754_log_sse2));
strong_alias (__ieee754_log, __log_finite)
# define __ieee754_log __ieee754_log_sse2
Modified: trunk/libc/sysdeps/x86_64/fpu/multiarch/s_atan.c
==============================================================================
--- trunk/libc/sysdeps/x86_64/fpu/multiarch/s_atan.c (original)
+++ trunk/libc/sysdeps/x86_64/fpu/multiarch/s_atan.c Wed Oct 26 16:46:37 2011
@@ -1,11 +1,18 @@
-#ifdef HAVE_FMA4_SUPPORT
+#if defined HAVE_FMA4_SUPPORT || defined HAVE_AVX_SUPPORT
# include <init-arch.h>
# include <math.h>
extern double __atan_sse2 (double);
+extern double __atan_avx (double);
+# ifdef HAVE_FMA4_SUPPORT
extern double __atan_fma4 (double);
+# else
+# undef HAS_FMA4
+# define HAS_FMA4 0
+# define __atan_fma4 ((void *) 0)
+# endif
-libm_ifunc (atan, HAS_FMA4 ? __atan_fma4 : __atan_sse2);
+libm_ifunc (atan, HAS_FMA4 ? __atan_fma4 : HAS_AVX ? __atan_avx : __atan_sse2);
# define atan __atan_sse2
#endif
Modified: trunk/libc/sysdeps/x86_64/fpu/multiarch/s_sin.c
==============================================================================
--- trunk/libc/sysdeps/x86_64/fpu/multiarch/s_sin.c (original)
+++ trunk/libc/sysdeps/x86_64/fpu/multiarch/s_sin.c Wed Oct 26 16:46:37 2011
@@ -1,17 +1,26 @@
-#ifdef HAVE_FMA4_SUPPORT
+#if defined HAVE_FMA4_SUPPORT || defined HAVE_AVX_SUPPORT
# include <init-arch.h>
# include <math.h>
# undef NAN
extern double __cos_sse2 (double);
+extern double __sin_sse2 (double);
+extern double __cos_avx (double);
+extern double __sin_avx (double);
+# ifdef HAVE_FMA4_SUPPORT
extern double __cos_fma4 (double);
-extern double __sin_sse2 (double);
extern double __sin_fma4 (double);
+# else
+# undef HAS_FMA4
+# define HAS_FMA4 0
+# define __cos_fma4 ((void *) 0)
+# define __sin_fma4 ((void *) 0)
+# endif
-libm_ifunc (__cos, HAS_FMA4 ? __cos_fma4 : __cos_sse2);
+libm_ifunc (__cos, HAS_FMA4 ? __cos_fma4 : HAS_AVX ? __cos_avx : __cos_sse2);
weak_alias (__cos, cos)
-libm_ifunc (__sin, HAS_FMA4 ? __sin_fma4 : __sin_sse2);
+libm_ifunc (__sin, HAS_FMA4 ? __sin_fma4 : HAS_AVX ? __sin_avx : __sin_sse2);
weak_alias (__sin, sin)
# define __cos __cos_sse2
Modified: trunk/libc/sysdeps/x86_64/fpu/multiarch/s_tan.c
==============================================================================
--- trunk/libc/sysdeps/x86_64/fpu/multiarch/s_tan.c (original)
+++ trunk/libc/sysdeps/x86_64/fpu/multiarch/s_tan.c Wed Oct 26 16:46:37 2011
@@ -1,11 +1,18 @@
-#ifdef HAVE_FMA4_SUPPORT
+#if defined HAVE_FMA4_SUPPORT || defined HAVE_AVX_SUPPORT
# include <init-arch.h>
# include <math.h>
extern double __tan_sse2 (double);
+extern double __tan_avx (double);
+# ifdef HAVE_FMA4_SUPPORT
extern double __tan_fma4 (double);
+# else
+# undef HAS_FMA4
+# define HAS_FMA4 0
+# define __tan_fma4 ((void *) 0)
+# endif
-libm_ifunc (tan, HAS_FMA4 ? __tan_fma4 : __tan_sse2);
+libm_ifunc (tan, HAS_FMA4 ? __tan_fma4 : HAS_AVX ? __tan_avx : __tan_sse2);
# define tan __tan_sse2
#endif
Modified: trunk/libc/sysdeps/x86_64/multiarch/init-arch.h
==============================================================================
--- trunk/libc/sysdeps/x86_64/multiarch/init-arch.h (original)
+++ trunk/libc/sysdeps/x86_64/multiarch/init-arch.h Wed Oct 26 16:46:37 2011
@@ -23,15 +23,18 @@
#define bit_Fast_Unaligned_Load (1 << 4)
#define bit_Prefer_PMINUB_for_stringop (1 << 5)
+#define bit_SSE2 (1 << 26)
+#define bit_SSSE3 (1 << 9)
+#define bit_SSE4_1 (1 << 19)
+#define bit_SSE4_2 (1 << 20)
+#define bit_AVX (1 << 28)
+#define bit_POPCOUNT (1 << 23)
+#define bit_FMA (1 << 12)
+#define bit_FMA4 (1 << 16)
+
#ifdef __ASSEMBLER__
# include <ifunc-defines.h>
-
-# define bit_SSE2 (1 << 26)
-# define bit_SSSE3 (1 << 9)
-# define bit_SSE4_1 (1 << 19)
-# define bit_SSE4_2 (1 << 20)
-# define bit_AVX (1 << 28)
# define index_SSE2 COMMON_CPUID_INDEX_1*CPUID_SIZE+CPUID_EDX_OFFSET
# define index_SSSE3 COMMON_CPUID_INDEX_1*CPUID_SIZE+CPUID_ECX_OFFSET
@@ -104,17 +107,18 @@
# endif
# define HAS_CPU_FEATURE(idx, reg, bit) \
- ((__get_cpu_features ()->cpuid[idx].reg & (1 << (bit))) != 0)
+ ((__get_cpu_features ()->cpuid[idx].reg & (bit)) != 0)
/* Following are the feature tests used throughout libc. */
-# define HAS_SSE2 HAS_CPU_FEATURE (COMMON_CPUID_INDEX_1, edx, 26)
-# define HAS_POPCOUNT HAS_CPU_FEATURE (COMMON_CPUID_INDEX_1, ecx, 23)
-# define HAS_SSSE3 HAS_CPU_FEATURE (COMMON_CPUID_INDEX_1, ecx, 9)
-# define HAS_SSE4_1 HAS_CPU_FEATURE (COMMON_CPUID_INDEX_1, ecx, 19)
-# define HAS_SSE4_2 HAS_CPU_FEATURE (COMMON_CPUID_INDEX_1, ecx, 20)
-# define HAS_FMA HAS_CPU_FEATURE (COMMON_CPUID_INDEX_1, ecx, 12)
-# define HAS_FMA4 HAS_CPU_FEATURE (COMMON_CPUID_INDEX_80000001, ecx, 16)
+# define HAS_SSE2 HAS_CPU_FEATURE (COMMON_CPUID_INDEX_1, edx, bit_SSE2)
+# define HAS_POPCOUNT HAS_CPU_FEATURE (COMMON_CPUID_INDEX_1, ecx, bit_POPCOUNT)
+# define HAS_SSSE3 HAS_CPU_FEATURE (COMMON_CPUID_INDEX_1, ecx, bit_SSSE3)
+# define HAS_SSE4_1 HAS_CPU_FEATURE (COMMON_CPUID_INDEX_1, ecx, bit_SSE4_1)
+# define HAS_SSE4_2 HAS_CPU_FEATURE (COMMON_CPUID_INDEX_1, ecx, bit_SSE4_2)
+# define HAS_FMA HAS_CPU_FEATURE (COMMON_CPUID_INDEX_1, ecx, bit_FMA)
+# define HAS_AVX HAS_CPU_FEATURE (COMMON_CPUID_INDEX_1, ecx, bit_AVX)
+# define HAS_FMA4 HAS_CPU_FEATURE (COMMON_CPUID_INDEX_80000001, ecx, bit_FMA4)
# define index_Fast_Rep_String FEATURE_INDEX_1
# define index_Fast_Copy_Backward FEATURE_INDEX_1
Modified: trunk/libc/sysdeps/x86_64/multiarch/strcmp-sse42.S
==============================================================================
--- trunk/libc/sysdeps/x86_64/multiarch/strcmp-sse42.S (original)
+++ trunk/libc/sysdeps/x86_64/multiarch/strcmp-sse42.S Wed Oct 26 16:46:37 2011
@@ -70,6 +70,23 @@
.byte 0x0f,0x1f,0x44,0x00,0x00
END (GLABEL(__strncasecmp))
/* FALLTHROUGH to strncasecmp_l. */
+#endif
+
+
+#ifdef USE_AVX
+# define movdqa vmovdqa
+# define movdqu vmovdqu
+# define pmovmskb vpmovmskb
+# define pcmpistri vpcmpistri
+# define psubb vpsubb
+# define pcmpeqb vpcmpeqb
+# define psrldq vpsrldq
+# define pslldq vpslldq
+# define palignr vpalignr
+# define pxor vpxor
+# define D(arg) arg, arg
+#else
+# define D(arg) arg
#endif
STRCMP_SSE42:
@@ -179,10 +196,10 @@
#else
# define TOLOWER(reg1, reg2)
#endif
- pxor %xmm0, %xmm0 /* clear %xmm0 for null char checks */
- pcmpeqb %xmm1, %xmm0 /* Any null chars? */
- pcmpeqb %xmm2, %xmm1 /* compare first 16 bytes for equality */
- psubb %xmm0, %xmm1 /* packed sub of comparison results*/
+ pxor %xmm0, D(%xmm0) /* clear %xmm0 for null char checks */
+ pcmpeqb %xmm1, D(%xmm0) /* Any null chars? */
+ pcmpeqb %xmm2, D(%xmm1) /* compare first 16 bytes for equality */
+ psubb %xmm0, D(%xmm1) /* packed sub of comparison results*/
pmovmskb %xmm1, %edx
sub $0xffff, %edx /* if first 16 bytes are same, edx == 0xffff */
jnz LABEL(less16bytes)/* If not, find different value or null char */
@@ -206,6 +223,7 @@
xor %r8d, %r8d
and $0xf, %ecx /* offset of rsi */
and $0xf, %eax /* offset of rdi */
+ pxor %xmm0, D(%xmm0) /* clear %xmm0 for null char check */
cmp %eax, %ecx
je LABEL(ashr_0) /* rsi and rdi relative offset same */
ja LABEL(bigger)
@@ -213,10 +231,13 @@
xchg %ecx, %eax
xchg %rsi, %rdi
LABEL(bigger):
+ movdqa (%rdi), %xmm2
+ movdqa (%rsi), %xmm1
lea 15(%rax), %r9
sub %rcx, %r9
lea LABEL(unaligned_table)(%rip), %r10
movslq (%r10, %r9,4), %r9
+ pcmpeqb %xmm1, D(%xmm0) /* Any null chars? */
lea (%r10, %r9), %r10
jmp *%r10 /* jump to corresponding case */
@@ -229,16 +250,15 @@
LABEL(ashr_0):
movdqa (%rsi), %xmm1
- pxor %xmm0, %xmm0 /* clear %xmm0 for null char check */
- pcmpeqb %xmm1, %xmm0 /* Any null chars? */
-#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
- pcmpeqb (%rdi), %xmm1 /* compare 16 bytes for equality */
+ pcmpeqb %xmm1, D(%xmm0) /* Any null chars? */
+#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
+ pcmpeqb (%rdi), D(%xmm1) /* compare 16 bytes for equality */
#else
movdqa (%rdi), %xmm2
TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm2, %xmm1 /* compare 16 bytes for equality */
-#endif
- psubb %xmm0, %xmm1 /* packed sub of comparison results*/
+ pcmpeqb %xmm2, D(%xmm1) /* compare 16 bytes for equality */
+#endif
+ psubb %xmm0, D(%xmm1) /* packed sub of comparison results*/
pmovmskb %xmm1, %r9d
shr %cl, %edx /* adjust 0xffff for offset */
shr %cl, %r9d /* adjust for 16-byte offset */
@@ -251,7 +271,6 @@
UPDATE_STRNCMP_COUNTER
mov $16, %rcx
mov $16, %r9
- pxor %xmm0, %xmm0 /* clear xmm0, may have changed above */
/*
* Now both strings are aligned at 16-byte boundary. Loop over strings
@@ -319,14 +338,10 @@
*/
.p2align 4
LABEL(ashr_1):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0 /* Any null chars? */
- pslldq $15, %xmm2 /* shift first string to align with second */
+ pslldq $15, D(%xmm2) /* shift first string to align with second */
TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, %xmm2 /* compare 16 bytes for equality */
- psubb %xmm0, %xmm2 /* packed sub of comparison results*/
+ pcmpeqb %xmm1, D(%xmm2) /* compare 16 bytes for equality */
+ psubb %xmm0, D(%xmm2) /* packed sub of comparison results*/
pmovmskb %xmm2, %r9d
shr %cl, %edx /* adjust 0xffff for offset */
shr %cl, %r9d /* adjust for 16-byte offset */
@@ -335,7 +350,6 @@
movdqa (%rdi), %xmm3
UPDATE_STRNCMP_COUNTER
- pxor %xmm0, %xmm0
mov $16, %rcx /* index for loads*/
mov $1, %r9d /* byte position left over from less32bytes case */
/*
@@ -355,7 +369,7 @@
LABEL(nibble_ashr_1_restart_use):
movdqa (%rdi, %rdx), %xmm0
- palignr $1, -16(%rdi, %rdx), %xmm0
+ palignr $1, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
#else
@@ -374,7 +388,7 @@
jg LABEL(nibble_ashr_1_use)
movdqa (%rdi, %rdx), %xmm0
- palignr $1, -16(%rdi, %rdx), %xmm0
+ palignr $1, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
#else
@@ -394,7 +408,7 @@
LABEL(nibble_ashr_1_use):
sub $0x1000, %r10
movdqa -16(%rdi, %rdx), %xmm0
- psrldq $1, %xmm0
+ psrldq $1, D(%xmm0)
pcmpistri $0x3a,%xmm0, %xmm0
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
cmp %r11, %rcx
@@ -412,14 +426,10 @@
*/
.p2align 4
LABEL(ashr_2):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $14, %xmm2
+ pslldq $14, D(%xmm2)
TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
+ pcmpeqb %xmm1, D(%xmm2)
+ psubb %xmm0, D(%xmm2)
pmovmskb %xmm2, %r9d
shr %cl, %edx
shr %cl, %r9d
@@ -428,7 +438,6 @@
movdqa (%rdi), %xmm3
UPDATE_STRNCMP_COUNTER
- pxor %xmm0, %xmm0
mov $16, %rcx /* index for loads */
mov $2, %r9d /* byte position left over from less32bytes case */
/*
@@ -448,7 +457,7 @@
LABEL(nibble_ashr_2_restart_use):
movdqa (%rdi, %rdx), %xmm0
- palignr $2, -16(%rdi, %rdx), %xmm0
+ palignr $2, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
#else
@@ -467,7 +476,7 @@
jg LABEL(nibble_ashr_2_use)
movdqa (%rdi, %rdx), %xmm0
- palignr $2, -16(%rdi, %rdx), %xmm0
+ palignr $2, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
#else
@@ -487,7 +496,7 @@
LABEL(nibble_ashr_2_use):
sub $0x1000, %r10
movdqa -16(%rdi, %rdx), %xmm0
- psrldq $2, %xmm0
+ psrldq $2, D(%xmm0)
pcmpistri $0x3a,%xmm0, %xmm0
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
cmp %r11, %rcx
@@ -505,14 +514,10 @@
*/
.p2align 4
LABEL(ashr_3):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $13, %xmm2
+ pslldq $13, D(%xmm2)
TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
+ pcmpeqb %xmm1, D(%xmm2)
+ psubb %xmm0, D(%xmm2)
pmovmskb %xmm2, %r9d
shr %cl, %edx
shr %cl, %r9d
@@ -522,7 +527,6 @@
UPDATE_STRNCMP_COUNTER
- pxor %xmm0, %xmm0
mov $16, %rcx /* index for loads */
mov $3, %r9d /* byte position left over from less32bytes case */
/*
@@ -541,7 +545,7 @@
LABEL(nibble_ashr_3_restart_use):
movdqa (%rdi, %rdx), %xmm0
- palignr $3, -16(%rdi, %rdx), %xmm0
+ palignr $3, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
#else
@@ -560,7 +564,7 @@
jg LABEL(nibble_ashr_3_use)
movdqa (%rdi, %rdx), %xmm0
- palignr $3, -16(%rdi, %rdx), %xmm0
+ palignr $3, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
#else
@@ -580,7 +584,7 @@
LABEL(nibble_ashr_3_use):
sub $0x1000, %r10
movdqa -16(%rdi, %rdx), %xmm0
- psrldq $3, %xmm0
+ psrldq $3, D(%xmm0)
pcmpistri $0x3a,%xmm0, %xmm0
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
cmp %r11, %rcx
@@ -598,14 +602,10 @@
*/
.p2align 4
LABEL(ashr_4):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $12, %xmm2
+ pslldq $12, D(%xmm2)
TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
+ pcmpeqb %xmm1, D(%xmm2)
+ psubb %xmm0, D(%xmm2)
pmovmskb %xmm2, %r9d
shr %cl, %edx
shr %cl, %r9d
@@ -615,7 +615,6 @@
UPDATE_STRNCMP_COUNTER
- pxor %xmm0, %xmm0
mov $16, %rcx /* index for loads */
mov $4, %r9d /* byte position left over from less32bytes case */
/*
@@ -635,7 +634,7 @@
LABEL(nibble_ashr_4_restart_use):
movdqa (%rdi, %rdx), %xmm0
- palignr $4, -16(%rdi, %rdx), %xmm0
+ palignr $4, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
#else
@@ -654,7 +653,7 @@
jg LABEL(nibble_ashr_4_use)
movdqa (%rdi, %rdx), %xmm0
- palignr $4, -16(%rdi, %rdx), %xmm0
+ palignr $4, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
#else
@@ -674,7 +673,7 @@
LABEL(nibble_ashr_4_use):
sub $0x1000, %r10
movdqa -16(%rdi, %rdx), %xmm0
- psrldq $4, %xmm0
+ psrldq $4, D(%xmm0)
pcmpistri $0x3a,%xmm0, %xmm0
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
cmp %r11, %rcx
@@ -692,14 +691,10 @@
*/
.p2align 4
LABEL(ashr_5):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $11, %xmm2
+ pslldq $11, D(%xmm2)
TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
+ pcmpeqb %xmm1, D(%xmm2)
+ psubb %xmm0, D(%xmm2)
pmovmskb %xmm2, %r9d
shr %cl, %edx
shr %cl, %r9d
@@ -709,7 +704,6 @@
UPDATE_STRNCMP_COUNTER
- pxor %xmm0, %xmm0
mov $16, %rcx /* index for loads */
mov $5, %r9d /* byte position left over from less32bytes case */
/*
@@ -729,7 +723,7 @@
LABEL(nibble_ashr_5_restart_use):
movdqa (%rdi, %rdx), %xmm0
- palignr $5, -16(%rdi, %rdx), %xmm0
+ palignr $5, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
#else
@@ -749,7 +743,7 @@
movdqa (%rdi, %rdx), %xmm0
- palignr $5, -16(%rdi, %rdx), %xmm0
+ palignr $5, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
#else
@@ -769,7 +763,7 @@
LABEL(nibble_ashr_5_use):
sub $0x1000, %r10
movdqa -16(%rdi, %rdx), %xmm0
- psrldq $5, %xmm0
+ psrldq $5, D(%xmm0)
pcmpistri $0x3a,%xmm0, %xmm0
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
cmp %r11, %rcx
@@ -787,14 +781,10 @@
*/
.p2align 4
LABEL(ashr_6):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $10, %xmm2
+ pslldq $10, D(%xmm2)
TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
+ pcmpeqb %xmm1, D(%xmm2)
+ psubb %xmm0, D(%xmm2)
pmovmskb %xmm2, %r9d
shr %cl, %edx
shr %cl, %r9d
@@ -804,7 +794,6 @@
UPDATE_STRNCMP_COUNTER
- pxor %xmm0, %xmm0
mov $16, %rcx /* index for loads */
mov $6, %r9d /* byte position left over from less32bytes case */
/*
@@ -824,7 +813,7 @@
LABEL(nibble_ashr_6_restart_use):
movdqa (%rdi, %rdx), %xmm0
- palignr $6, -16(%rdi, %rdx), %xmm0
+ palignr $6, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
#else
@@ -843,7 +832,7 @@
jg LABEL(nibble_ashr_6_use)
movdqa (%rdi, %rdx), %xmm0
- palignr $6, -16(%rdi, %rdx), %xmm0
+ palignr $6, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
#else
@@ -863,7 +852,7 @@
LABEL(nibble_ashr_6_use):
sub $0x1000, %r10
movdqa -16(%rdi, %rdx), %xmm0
- psrldq $6, %xmm0
+ psrldq $6, D(%xmm0)
pcmpistri $0x3a,%xmm0, %xmm0
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
cmp %r11, %rcx
@@ -881,14 +870,10 @@
*/
.p2align 4
LABEL(ashr_7):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $9, %xmm2
+ pslldq $9, D(%xmm2)
TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
+ pcmpeqb %xmm1, D(%xmm2)
+ psubb %xmm0, D(%xmm2)
pmovmskb %xmm2, %r9d
shr %cl, %edx
shr %cl, %r9d
@@ -898,7 +883,6 @@
UPDATE_STRNCMP_COUNTER
- pxor %xmm0, %xmm0
mov $16, %rcx /* index for loads */
mov $7, %r9d /* byte position left over from less32bytes case */
/*
@@ -918,7 +902,7 @@
LABEL(nibble_ashr_7_restart_use):
movdqa (%rdi, %rdx), %xmm0
- palignr $7, -16(%rdi, %rdx), %xmm0
+ palignr $7, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
#else
@@ -937,7 +921,7 @@
jg LABEL(nibble_ashr_7_use)
movdqa (%rdi, %rdx), %xmm0
- palignr $7, -16(%rdi, %rdx), %xmm0
+ palignr $7, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
#else
@@ -957,7 +941,7 @@
LABEL(nibble_ashr_7_use):
sub $0x1000, %r10
movdqa -16(%rdi, %rdx), %xmm0
- psrldq $7, %xmm0
+ psrldq $7, D(%xmm0)
pcmpistri $0x3a,%xmm0, %xmm0
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
cmp %r11, %rcx
@@ -975,14 +959,10 @@
*/
.p2align 4
LABEL(ashr_8):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $8, %xmm2
+ pslldq $8, D(%xmm2)
TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
+ pcmpeqb %xmm1, D(%xmm2)
+ psubb %xmm0, D(%xmm2)
pmovmskb %xmm2, %r9d
shr %cl, %edx
shr %cl, %r9d
@@ -992,7 +972,6 @@
UPDATE_STRNCMP_COUNTER
- pxor %xmm0, %xmm0
mov $16, %rcx /* index for loads */
mov $8, %r9d /* byte position left over from less32bytes case */
/*
@@ -1012,7 +991,7 @@
LABEL(nibble_ashr_8_restart_use):
movdqa (%rdi, %rdx), %xmm0
- palignr $8, -16(%rdi, %rdx), %xmm0
+ palignr $8, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
#else
@@ -1031,7 +1010,7 @@
jg LABEL(nibble_ashr_8_use)
movdqa (%rdi, %rdx), %xmm0
- palignr $8, -16(%rdi, %rdx), %xmm0
+ palignr $8, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
#else
@@ -1051,7 +1030,7 @@
LABEL(nibble_ashr_8_use):
sub $0x1000, %r10
movdqa -16(%rdi, %rdx), %xmm0
- psrldq $8, %xmm0
+ psrldq $8, D(%xmm0)
pcmpistri $0x3a,%xmm0, %xmm0
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
cmp %r11, %rcx
@@ -1069,14 +1048,10 @@
*/
.p2align 4
LABEL(ashr_9):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $7, %xmm2
+ pslldq $7, D(%xmm2)
TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
+ pcmpeqb %xmm1, D(%xmm2)
+ psubb %xmm0, D(%xmm2)
pmovmskb %xmm2, %r9d
shr %cl, %edx
shr %cl, %r9d
@@ -1086,7 +1061,6 @@
UPDATE_STRNCMP_COUNTER
- pxor %xmm0, %xmm0
mov $16, %rcx /* index for loads */
mov $9, %r9d /* byte position left over from less32bytes case */
/*
@@ -1107,7 +1081,7 @@
LABEL(nibble_ashr_9_restart_use):
movdqa (%rdi, %rdx), %xmm0
- palignr $9, -16(%rdi, %rdx), %xmm0
+ palignr $9, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
#else
@@ -1126,7 +1100,7 @@
jg LABEL(nibble_ashr_9_use)
movdqa (%rdi, %rdx), %xmm0
- palignr $9, -16(%rdi, %rdx), %xmm0
+ palignr $9, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
#else
@@ -1146,7 +1120,7 @@
LABEL(nibble_ashr_9_use):
sub $0x1000, %r10
movdqa -16(%rdi, %rdx), %xmm0
- psrldq $9, %xmm0
+ psrldq $9, D(%xmm0)
pcmpistri $0x3a,%xmm0, %xmm0
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
cmp %r11, %rcx
@@ -1164,14 +1138,10 @@
*/
.p2align 4
LABEL(ashr_10):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $6, %xmm2
+ pslldq $6, D(%xmm2)
TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
+ pcmpeqb %xmm1, D(%xmm2)
+ psubb %xmm0, D(%xmm2)
pmovmskb %xmm2, %r9d
shr %cl, %edx
shr %cl, %r9d
@@ -1181,7 +1151,6 @@
UPDATE_STRNCMP_COUNTER
- pxor %xmm0, %xmm0
mov $16, %rcx /* index for loads */
mov $10, %r9d /* byte position left over from less32bytes case */
/*
@@ -1201,7 +1170,7 @@
LABEL(nibble_ashr_10_restart_use):
movdqa (%rdi, %rdx), %xmm0
- palignr $10, -16(%rdi, %rdx), %xmm0
+ palignr $10, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
#else
@@ -1220,7 +1189,7 @@
jg LABEL(nibble_ashr_10_use)
movdqa (%rdi, %rdx), %xmm0
- palignr $10, -16(%rdi, %rdx), %xmm0
+ palignr $10, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
#else
@@ -1240,7 +1209,7 @@
LABEL(nibble_ashr_10_use):
sub $0x1000, %r10
movdqa -16(%rdi, %rdx), %xmm0
- psrldq $10, %xmm0
+ psrldq $10, D(%xmm0)
pcmpistri $0x3a,%xmm0, %xmm0
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
cmp %r11, %rcx
@@ -1258,14 +1227,10 @@
*/
.p2align 4
LABEL(ashr_11):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $5, %xmm2
+ pslldq $5, D(%xmm2)
TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
+ pcmpeqb %xmm1, D(%xmm2)
+ psubb %xmm0, D(%xmm2)
pmovmskb %xmm2, %r9d
shr %cl, %edx
shr %cl, %r9d
@@ -1275,7 +1240,6 @@
UPDATE_STRNCMP_COUNTER
- pxor %xmm0, %xmm0
mov $16, %rcx /* index for loads */
mov $11, %r9d /* byte position left over from less32bytes case */
/*
@@ -1295,7 +1259,7 @@
LABEL(nibble_ashr_11_restart_use):
movdqa (%rdi, %rdx), %xmm0
- palignr $11, -16(%rdi, %rdx), %xmm0
+ palignr $11, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
#else
@@ -1314,7 +1278,7 @@
jg LABEL(nibble_ashr_11_use)
movdqa (%rdi, %rdx), %xmm0
- palignr $11, -16(%rdi, %rdx), %xmm0
+ palignr $11, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
#else
@@ -1334,7 +1298,7 @@
LABEL(nibble_ashr_11_use):
sub $0x1000, %r10
movdqa -16(%rdi, %rdx), %xmm0
- psrldq $11, %xmm0
+ psrldq $11, D(%xmm0)
pcmpistri $0x3a,%xmm0, %xmm0
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
cmp %r11, %rcx
@@ -1352,14 +1316,10 @@
*/
.p2align 4
LABEL(ashr_12):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $4, %xmm2
+ pslldq $4, D(%xmm2)
TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
+ pcmpeqb %xmm1, D(%xmm2)
+ psubb %xmm0, D(%xmm2)
pmovmskb %xmm2, %r9d
shr %cl, %edx
shr %cl, %r9d
@@ -1369,7 +1329,6 @@
UPDATE_STRNCMP_COUNTER
- pxor %xmm0, %xmm0
mov $16, %rcx /* index for loads */
mov $12, %r9d /* byte position left over from less32bytes case */
/*
@@ -1389,7 +1348,7 @@
LABEL(nibble_ashr_12_restart_use):
movdqa (%rdi, %rdx), %xmm0
- palignr $12, -16(%rdi, %rdx), %xmm0
+ palignr $12, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
#else
@@ -1408,7 +1367,7 @@
jg LABEL(nibble_ashr_12_use)
movdqa (%rdi, %rdx), %xmm0
- palignr $12, -16(%rdi, %rdx), %xmm0
+ palignr $12, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
#else
@@ -1428,7 +1387,7 @@
LABEL(nibble_ashr_12_use):
sub $0x1000, %r10
movdqa -16(%rdi, %rdx), %xmm0
- psrldq $12, %xmm0
+ psrldq $12, D(%xmm0)
pcmpistri $0x3a,%xmm0, %xmm0
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
cmp %r11, %rcx
@@ -1446,14 +1405,10 @@
*/
.p2align 4
LABEL(ashr_13):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $3, %xmm2
+ pslldq $3, D(%xmm2)
TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
+ pcmpeqb %xmm1, D(%xmm2)
+ psubb %xmm0, D(%xmm2)
pmovmskb %xmm2, %r9d
shr %cl, %edx
shr %cl, %r9d
@@ -1463,7 +1418,6 @@
UPDATE_STRNCMP_COUNTER
- pxor %xmm0, %xmm0
mov $16, %rcx /* index for loads */
mov $13, %r9d /* byte position left over from less32bytes case */
/*
@@ -1484,7 +1438,7 @@
LABEL(nibble_ashr_13_restart_use):
movdqa (%rdi, %rdx), %xmm0
- palignr $13, -16(%rdi, %rdx), %xmm0
+ palignr $13, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
#else
@@ -1503,7 +1457,7 @@
jg LABEL(nibble_ashr_13_use)
movdqa (%rdi, %rdx), %xmm0
- palignr $13, -16(%rdi, %rdx), %xmm0
+ palignr $13, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
#else
@@ -1523,7 +1477,7 @@
LABEL(nibble_ashr_13_use):
sub $0x1000, %r10
movdqa -16(%rdi, %rdx), %xmm0
- psrldq $13, %xmm0
+ psrldq $13, D(%xmm0)
pcmpistri $0x3a,%xmm0, %xmm0
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
cmp %r11, %rcx
@@ -1541,14 +1495,10 @@
*/
.p2align 4
LABEL(ashr_14):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $2, %xmm2
+ pslldq $2, D(%xmm2)
TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
+ pcmpeqb %xmm1, D(%xmm2)
+ psubb %xmm0, D(%xmm2)
pmovmskb %xmm2, %r9d
shr %cl, %edx
shr %cl, %r9d
@@ -1558,7 +1508,6 @@
UPDATE_STRNCMP_COUNTER
- pxor %xmm0, %xmm0
mov $16, %rcx /* index for loads */
mov $14, %r9d /* byte position left over from less32bytes case */
/*
@@ -1579,7 +1528,7 @@
LABEL(nibble_ashr_14_restart_use):
movdqa (%rdi, %rdx), %xmm0
- palignr $14, -16(%rdi, %rdx), %xmm0
+ palignr $14, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
#else
@@ -1598,7 +1547,7 @@
jg LABEL(nibble_ashr_14_use)
movdqa (%rdi, %rdx), %xmm0
- palignr $14, -16(%rdi, %rdx), %xmm0
+ palignr $14, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
#else
@@ -1618,7 +1567,7 @@
LABEL(nibble_ashr_14_use):
sub $0x1000, %r10
movdqa -16(%rdi, %rdx), %xmm0
- psrldq $14, %xmm0
+ psrldq $14, D(%xmm0)
pcmpistri $0x3a,%xmm0, %xmm0
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
cmp %r11, %rcx
@@ -1636,14 +1585,10 @@
*/
.p2align 4
LABEL(ashr_15):
- pxor %xmm0, %xmm0
- movdqa (%rdi), %xmm2
- movdqa (%rsi), %xmm1
- pcmpeqb %xmm1, %xmm0
- pslldq $1, %xmm2
+ pslldq $1, D(%xmm2)
TOLOWER (%xmm1, %xmm2)
- pcmpeqb %xmm1, %xmm2
- psubb %xmm0, %xmm2
+ pcmpeqb %xmm1, D(%xmm2)
+ psubb %xmm0, D(%xmm2)
pmovmskb %xmm2, %r9d
shr %cl, %edx
shr %cl, %r9d
@@ -1654,7 +1599,6 @@
UPDATE_STRNCMP_COUNTER
- pxor %xmm0, %xmm0
mov $16, %rcx /* index for loads */
mov $15, %r9d /* byte position left over from less32bytes case */
/*
@@ -1676,7 +1620,7 @@
LABEL(nibble_ashr_15_restart_use):
movdqa (%rdi, %rdx), %xmm0
- palignr $15, -16(%rdi, %rdx), %xmm0
+ palignr $15, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
#else
@@ -1695,7 +1639,7 @@
jg LABEL(nibble_ashr_15_use)
movdqa (%rdi, %rdx), %xmm0
- palignr $15, -16(%rdi, %rdx), %xmm0
+ palignr $15, -16(%rdi, %rdx), D(%xmm0)
#if !defined USE_AS_STRCASECMP_L && !defined USE_AS_STRNCASECMP_L
pcmpistri $0x1a, (%rsi,%rdx), %xmm0
#else
@@ -1715,7 +1659,7 @@
LABEL(nibble_ashr_15_use):
sub $0x1000, %r10
movdqa -16(%rdi, %rdx), %xmm0
- psrldq $15, %xmm0
+ psrldq $15, D(%xmm0)
pcmpistri $0x3a,%xmm0, %xmm0
#if defined USE_AS_STRNCMP || defined USE_AS_STRNCASECMP_L
cmp %r11, %rcx
@@ -1834,3 +1778,14 @@
#undef LABEL
#undef GLABEL
#undef SECTION
+#undef movdqa
+#undef movdqu
+#undef pmovmskb
+#undef pcmpistri
+#undef psubb
+#undef pcmpeqb
+#undef psrldq
+#undef pslldq
+#undef palignr
+#undef pxor
+#undef D
Modified: trunk/ports/ChangeLog.arm
==============================================================================
--- trunk/ports/ChangeLog.arm (original)
+++ trunk/ports/ChangeLog.arm Wed Oct 26 16:46:37 2011
@@ -1,3 +1,8 @@
+2011-10-26 Joseph Myers <joseph@xxxxxxxxxxxxxxxx>
+
+ * sysdeps/arm/dl-machine.h, sysdeps/unix/arm/sysdep.S: Restore
+ cases for use in rtld.
+
2011-10-05 Andreas Schwab <schwab@xxxxxxxxxx>
* sysdeps/arm/dl-machine.h (elf_machine_rel, elf_machine_rela)
Modified: trunk/ports/ChangeLog.m68k
==============================================================================
--- trunk/ports/ChangeLog.m68k (original)
+++ trunk/ports/ChangeLog.m68k Wed Oct 26 16:46:37 2011
@@ -1,3 +1,9 @@
+2011-10-26 Andreas Schwab <schwab@xxxxxxxxxxxxxx>
+
+ * sysdeps/m68k/m680x0/fpu/math_private.h: New file.
+
+ * sysdeps/m68k/m680x0/fpu/sincostab.c: New file.
+
2011-10-23 Andreas Schwab <schwab@xxxxxxxxxxxxxx>
* sysdeps/m68k/coldfire/fpu/e_sqrt.c: Add __sqrt_finite alias.
Modified: trunk/ports/sysdeps/arm/dl-machine.h
==============================================================================
--- trunk/ports/sysdeps/arm/dl-machine.h (original)
+++ trunk/ports/sysdeps/arm/dl-machine.h Wed Oct 26 16:46:37 2011
@@ -242,12 +242,18 @@
define the value.
ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one
of the main executable's symbols, as for a COPY reloc. */
-#define elf_machine_type_class(type) \
+#ifndef RTLD_BOOTSTRAP
+# define elf_machine_type_class(type) \
((((type) == R_ARM_JUMP_SLOT || (type) == R_ARM_TLS_DTPMOD32 \
|| (type) == R_ARM_TLS_DTPOFF32 || (type) == R_ARM_TLS_TPOFF32 \
|| (type) == R_ARM_TLS_DESC) \
* ELF_RTYPE_CLASS_PLT) \
| (((type) == R_ARM_COPY) * ELF_RTYPE_CLASS_COPY))
+#else
+#define elf_machine_type_class(type) \
+ ((((type) == R_ARM_JUMP_SLOT) * ELF_RTYPE_CLASS_PLT) \
+ | (((type) == R_ARM_COPY) * ELF_RTYPE_CLASS_COPY))
+#endif
/* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
#define ELF_MACHINE_JMP_SLOT R_ARM_JUMP_SLOT
Modified: trunk/ports/sysdeps/unix/arm/sysdep.S
==============================================================================
--- trunk/ports/sysdeps/unix/arm/sysdep.S (original)
+++ trunk/ports/sysdeps/unix/arm/sysdep.S Wed Oct 26 16:46:37 2011
@@ -42,6 +42,7 @@
moveq r0, $EAGAIN /* Yes; translate it to EAGAIN. */
#endif
+#ifndef IS_IN_rtld
mov ip, lr
cfi_register (lr, ip)
mov r1, r0
@@ -57,6 +58,16 @@
RETINSTR (, ip)
1: .word errno(gottpoff) + (. - 2b - 8)
+#elif RTLD_PRIVATE_ERRNO
+ ldr r1, 1f
+0: str r0, [pc, r1]
+ mvn r0, $0
+ DO_RET(r14)
+
+1: .word C_SYMBOL_NAME(rtld_errno) - 0b - 8
+#else
+#error "Unsupported non-TLS case"
+#endif
#undef __syscall_error
END (__syscall_error)
_______________________________________________
Commits mailing list
Commits@xxxxxxxxxx
http://eglibc.org/cgi-bin/mailman/listinfo/commits