[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[commits] r2258 - in /fsf/trunk/libc: ./ malloc/ nptl/ nptl/sysdeps/pthread/ nptl/sysdeps/unix/sysv/linux/ nptl/sysdeps/x86_64/ nscd/ ...



Author: eglibc
Date: Wed May 23 00:03:40 2007
New Revision: 2258

Log:
Import glibc-mainline for 2007-05-23

Added:
    fsf/trunk/libc/sysdeps/unix/sysv/linux/epoll_pwait.c
    fsf/trunk/libc/sysdeps/unix/sysv/linux/i386/epoll_pwait.S
    fsf/trunk/libc/sysdeps/x86_64/cacheinfo.c
Modified:
    fsf/trunk/libc/ChangeLog
    fsf/trunk/libc/malloc/Makefile
    fsf/trunk/libc/malloc/arena.c
    fsf/trunk/libc/malloc/hooks.c
    fsf/trunk/libc/malloc/malloc.c
    fsf/trunk/libc/nptl/ChangeLog
    fsf/trunk/libc/nptl/init.c
    fsf/trunk/libc/nptl/sysdeps/pthread/pthread-functions.h
    fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/libc_pthread_init.c
    fsf/trunk/libc/nptl/sysdeps/x86_64/tls.h
    fsf/trunk/libc/nptl/tst-robust9.c
    fsf/trunk/libc/nscd/aicache.c
    fsf/trunk/libc/stdio-common/tst-sprintf.c
    fsf/trunk/libc/stdio-common/vfprintf.c
    fsf/trunk/libc/sysdeps/unix/sysv/linux/Makefile
    fsf/trunk/libc/sysdeps/unix/sysv/linux/syscalls.list
    fsf/trunk/libc/sysdeps/unix/sysv/linux/x86_64/sys/epoll.h
    fsf/trunk/libc/sysdeps/unix/sysv/linux/x86_64/sysconf.c
    fsf/trunk/libc/sysdeps/x86_64/Makefile
    fsf/trunk/libc/sysdeps/x86_64/memcpy.S
    fsf/trunk/libc/sysdeps/x86_64/mempcpy.S

Modified: fsf/trunk/libc/ChangeLog
==============================================================================
--- fsf/trunk/libc/ChangeLog (original)
+++ fsf/trunk/libc/ChangeLog Wed May 23 00:03:40 2007
@@ -1,9 +1,60 @@
+2007-05-21  Ulrich Drepper  <drepper@xxxxxxxxxx>
+
+	* sysdeps/x86_64/cacheinfo.c (init_cacheinfo): Pass correct value
+	as second parameter to handle_intel.
+
+	* nscd/aicache.c (addhstaiX): If reported TTL is zero don't cache
+	the entry.
+
+	* sysdeps/unix/sysv/linux/x86_64/sysconf.c: Move cache information
+	handling to ...
+	* sysdeps/x86_64/cacheinfo.c: ... here.  New file.
+	* sysdeps/x86_64/Makefile [subdir=string] (sysdep_routines): Add
+	cacheinfo.
+	* sysdeps/x86_64/memcpy.S: Complete rewrite.
+	* sysdeps/x86_64/mempcpy.S: Adjust appropriately.
+	Patch by Evandro Menezes <evandro.menezes@xxxxxxx>.
+
+	* sysdeps/unix/sysv/linux/i386/epoll_pwait.S: New file.
+
+2007-05-21  Jakub Jelinek  <jakub@xxxxxxxxxx>
+
+	[BZ #4525]
+	* sysdeps/unix/sysv/linux/Makefile (sysdep_routines): Add epoll_pwait.
+	* sysdeps/unix/sysv/linux/epoll_pwait.c: New file.
+	* sysdeps/unix/sysv/linux/syscalls.list (epoll_pwait): Remove.
+
+	* sysdeps/unix/sysv/linux/x86_64/sys/epoll.h (epoll_pwait): Declare.
+
+	[BZ #4514]
+	* stdio-common/vfprintf.c (vfprintf): Don't shadow workstart variable,
+	reinitialize workend at the start of each do_positional format spec
+	loop, free workstart before do_positional loops.
+	(printf_unknown): Fix size of work_buffer.
+	* stdio-common/tst-sprintf.c (main): Add 3 new testcases.
+
+	* malloc/hooks.c (MALLOC_STATE_VERSION): Bump.
+	(public_sET_STATe): If ms->version < 3, put all chunks into
+	unsorted chunks and clear {fd,bk}_nextsize fields of largebin
+	chunks.
+
+	* malloc/malloc.c [MALLOC_DEBUG]: Revert 2007-05-13 changes.
+	* malloc/hooks.c: Likewise.
+	* malloc/arena.c: Likewise.
+	* malloc/malloc.c (do_check_malloc_state): Don't assert
+	n_mmaps is not greater than n_mmaps_max.  This removes the need
+	for the previous change.
+
+	* malloc/Makefile (CFLAGS-malloc.c): Revert accidental
+	2007-05-07 commit.
+
 2007-05-19  Ulrich Drepper  <drepper@xxxxxxxxxx>
 
 	* sysdeps/unix/sysv/linux/kernel-features.h (__ASSUME_PRIVATE_FUTEX):
 	Define for kernel >= 2.6.22.
 
 2007-05-18  Ulrich Drepper  <drepper@xxxxxxxxxx>
+
 	* elf/dl-close.c (_dl_close_worker): When removing object from
 	global scope, wait for all lookups to finish afterwards.
 	* elf/dl-open.c (add_to_global): When global scope array must

Modified: fsf/trunk/libc/malloc/Makefile
==============================================================================
--- fsf/trunk/libc/malloc/Makefile (original)
+++ fsf/trunk/libc/malloc/Makefile Wed May 23 00:03:40 2007
@@ -104,7 +104,6 @@
 include ../Rules
 
 CFLAGS-mcheck-init.c = $(PIC-ccflag)
-CFLAGS-malloc.c += -DMALLOC_DEBUG
 
 $(objpfx)libmcheck.a: $(objpfx)mcheck-init.o
 	-rm -f $@

Modified: fsf/trunk/libc/malloc/arena.c
==============================================================================
--- fsf/trunk/libc/malloc/arena.c (original)
+++ fsf/trunk/libc/malloc/arena.c Wed May 23 00:03:40 2007
@@ -370,9 +370,6 @@
   mp_.top_pad        = DEFAULT_TOP_PAD;
 #endif
   mp_.n_mmaps_max    = DEFAULT_MMAP_MAX;
-#if MALLOC_DEBUG
-  mp_.n_mmaps_cmax   = DEFAULT_MMAP_MAX;
-#endif
   mp_.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
   mp_.trim_threshold = DEFAULT_TRIM_THRESHOLD;
   mp_.pagesize       = malloc_getpagesize;

Modified: fsf/trunk/libc/malloc/hooks.c
==============================================================================
--- fsf/trunk/libc/malloc/hooks.c (original)
+++ fsf/trunk/libc/malloc/hooks.c Wed May 23 00:03:40 2007
@@ -496,7 +496,7 @@
    then the hooks are reset to 0.  */
 
 #define MALLOC_STATE_MAGIC   0x444c4541l
-#define MALLOC_STATE_VERSION (0*0x100l + 2l) /* major*0x100 + minor */
+#define MALLOC_STATE_VERSION (0*0x100l + 3l) /* major*0x100 + minor */
 
 struct malloc_save_state {
   long          magic;
@@ -507,9 +507,6 @@
   unsigned long trim_threshold;
   unsigned long top_pad;
   unsigned int  n_mmaps_max;
-#if MALLOC_DEBUG
-  unsigned int  n_mmaps_cmax;
-#endif
   unsigned long mmap_threshold;
   int           check_action;
   unsigned long max_sbrked_mem;
@@ -553,9 +550,6 @@
   ms->trim_threshold = mp_.trim_threshold;
   ms->top_pad = mp_.top_pad;
   ms->n_mmaps_max = mp_.n_mmaps_max;
-#if MALLOC_DEBUG
-  ms->n_mmaps_cmax = mp_.n_mmaps_cmax;
-#endif
   ms->mmap_threshold = mp_.mmap_threshold;
   ms->check_action = check_action;
   ms->max_sbrked_mem = main_arena.max_system_mem;
@@ -601,8 +595,9 @@
       assert(ms->av[2*i+3] == 0);
       first(b) = last(b) = b;
     } else {
-      if(i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
-			  largebin_index(chunksize(ms->av[2*i+3]))==i)) {
+      if(ms->version >= 3 &&
+	 (i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
+			   largebin_index(chunksize(ms->av[2*i+3]))==i))) {
 	first(b) = ms->av[2*i+2];
 	last(b) = ms->av[2*i+3];
 	/* Make sure the links to the bins within the heap are correct.  */
@@ -622,14 +617,22 @@
       }
     }
   }
+  if (ms->version < 3) {
+    /* Clear fd_nextsize and bk_nextsize fields.  */
+    b = unsorted_chunks(&main_arena)->fd;
+    while (b != unsorted_chunks(&main_arena)) {
+      if (!in_smallbin_range(chunksize(b))) {
+	b->fd_nextsize = NULL;
+	b->bk_nextsize = NULL;
+      }
+      b = b->fd;
+    }
+  }
   mp_.sbrk_base = ms->sbrk_base;
   main_arena.system_mem = ms->sbrked_mem_bytes;
   mp_.trim_threshold = ms->trim_threshold;
   mp_.top_pad = ms->top_pad;
   mp_.n_mmaps_max = ms->n_mmaps_max;
-#if MALLOC_DEBUG
-  mp_.n_mmaps_cmax = ms->n_mmaps_cmax;
-#endif
   mp_.mmap_threshold = ms->mmap_threshold;
   check_action = ms->check_action;
   main_arena.max_system_mem = ms->max_sbrked_mem;

Modified: fsf/trunk/libc/malloc/malloc.c
==============================================================================
--- fsf/trunk/libc/malloc/malloc.c (original)
+++ fsf/trunk/libc/malloc/malloc.c Wed May 23 00:03:40 2007
@@ -2358,9 +2358,6 @@
   /* Memory map support */
   int              n_mmaps;
   int              n_mmaps_max;
-#if MALLOC_DEBUG
-  int              n_mmaps_cmax;
-#endif
   int              max_n_mmaps;
   /* the mmap_threshold is dynamic, until the user sets
      it manually, at which point we need to disable any
@@ -2876,8 +2873,6 @@
   assert(total <= (unsigned long)(mp_.max_total_mem));
   assert(mp_.n_mmaps >= 0);
 #endif
-  assert(mp_.n_mmaps <= mp_.n_mmaps_cmax);
-  assert(mp_.n_mmaps_max <= mp_.n_mmaps_cmax);
   assert(mp_.n_mmaps <= mp_.max_n_mmaps);
 
   assert((unsigned long)(av->system_mem) <=
@@ -3475,13 +3470,6 @@
     }
 
   mp_.n_mmaps--;
-#if MALLOC_DEBUG
-  if (mp_.n_mmaps_cmax > mp_.n_mmaps_max)
-    {
-      assert (mp_.n_mmaps_cmax == mp_.n_mmaps + 1);
-      mp_.n_mmaps_cmax = mp_.n_mmaps;
-    }
-#endif
   mp_.mmapped_mem -= total_size;
 
   int ret __attribute__ ((unused)) = munmap((char *)block, total_size);
@@ -5397,9 +5385,6 @@
   mp_.n_mmaps_max = 0;
   mem = _int_malloc(av, size);
   mp_.n_mmaps_max = mmx;   /* reset mmap */
-#if MALLOC_DEBUG
-  mp_.n_mmaps_cmax = mmx;
-#endif
   if (mem == 0)
     return 0;
 
@@ -5725,17 +5710,8 @@
       res = 0;
     else
 #endif
-      {
-#if MALLOC_DEBUG
-	if (mp_.n_mmaps <= value)
-	  mp_.n_mmaps_cmax = value;
-	else
-	  mp_.n_mmaps_cmax = mp_.n_mmaps;
-#endif
-
-	mp_.n_mmaps_max = value;
-	mp_.no_dyn_threshold = 1;
-      }
+      mp_.n_mmaps_max = value;
+      mp_.no_dyn_threshold = 1;
     break;
 
   case M_CHECK_ACTION:

Modified: fsf/trunk/libc/nptl/ChangeLog
==============================================================================
--- fsf/trunk/libc/nptl/ChangeLog (original)
+++ fsf/trunk/libc/nptl/ChangeLog Wed May 23 00:03:40 2007
@@ -1,3 +1,20 @@
+2007-05-21  Ulrich Drepper  <drepper@xxxxxxxxxx>
+
+	* sysdeps/pthread/pthread-functions.h (struct pthread_functions):
+	Remove ptr_wait_lookup_done again.
+	* init.c (pthread_functions): Don't add .ptr_wait_lookup_done here.
+	(__pthread_initialize_minimal_internal): Initialize
+	_dl_wait_lookup_done pointer in _rtld_global directly.
+	* sysdeps/unix/sysv/linux/libc_pthread_init.c (__libc_pthread_init):
+	Remove code to code _dl_wait_lookup_done.
+	* sysdeps/x86_64/tls.h (THREAD_GSCOPE_WAIT): The pointer is not
+	encrypted for now.
+
+2007-05-21  Jakub Jelinek  <jakub@xxxxxxxxxx>
+
+	* tst-robust9.c (do_test): Don't fail if ENABLE_PI and
+	pthread_mutex_init failed with ENOTSUP.
+
 2007-05-19  Ulrich Drepper  <drepper@xxxxxxxxxx>
 
 	* allocatestack.c (__wait_lookup_done): New function.

Modified: fsf/trunk/libc/nptl/init.c
==============================================================================
--- fsf/trunk/libc/nptl/init.c (original)
+++ fsf/trunk/libc/nptl/init.c Wed May 23 00:03:40 2007
@@ -117,8 +117,7 @@
     .ptr__nptl_deallocate_tsd = __nptl_deallocate_tsd,
     .ptr__nptl_setxid = __nptl_setxid,
     /* For now only the stack cache needs to be freed.  */
-    .ptr_freeres = __free_stack_cache,
-    .ptr_wait_lookup_done = __wait_lookup_done
+    .ptr_freeres = __free_stack_cache
   };
 # define ptr_pthread_functions &pthread_functions
 #else
@@ -366,6 +365,8 @@
 
   GL(dl_init_static_tls) = &__pthread_init_static_tls;
 
+  GL(dl_wait_lookup_done) = &__wait_lookup_done;
+
   /* Register the fork generation counter with the libc.  */
 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
   __libc_multiple_threads_ptr =

Modified: fsf/trunk/libc/nptl/sysdeps/pthread/pthread-functions.h
==============================================================================
--- fsf/trunk/libc/nptl/sysdeps/pthread/pthread-functions.h (original)
+++ fsf/trunk/libc/nptl/sysdeps/pthread/pthread-functions.h Wed May 23 00:03:40 2007
@@ -97,7 +97,6 @@
   void (*ptr__nptl_deallocate_tsd) (void);
   int (*ptr__nptl_setxid) (struct xid_command *);
   void (*ptr_freeres) (void);
-  void (*ptr_wait_lookup_done) (void);
 };
 
 /* Variable in libc.so.  */

Modified: fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/libc_pthread_init.c
==============================================================================
--- fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/libc_pthread_init.c (original)
+++ fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/libc_pthread_init.c Wed May 23 00:03:40 2007
@@ -71,12 +71,6 @@
       dest->parr[cnt] = p;
     }
   __libc_pthread_functions_init = 1;
-
-# ifdef RTLD_NOT_MANGLED
-  GL(dl_wait_lookup_done) = functions->ptr_wait_lookup_done;
-# else
-  GL(dl_wait_lookup_done) = __libc_pthread_functions.ptr_wait_lookup_done;
-# endif
 #endif
 
 #ifndef TLS_MULTIPLE_THREADS_IN_TCB

Modified: fsf/trunk/libc/nptl/sysdeps/x86_64/tls.h
==============================================================================
--- fsf/trunk/libc/nptl/sysdeps/x86_64/tls.h (original)
+++ fsf/trunk/libc/nptl/sysdeps/x86_64/tls.h Wed May 23 00:03:40 2007
@@ -357,10 +357,7 @@
 #define THREAD_GSCOPE_SET_FLAG() \
   THREAD_SETMEM (THREAD_SELF, header.gscope_flag, THREAD_GSCOPE_FLAG_USED)
 #define THREAD_GSCOPE_WAIT() \
-  do { void (*ptr) (void) = GL(dl_wait_lookup_done);			      \
-       PTR_DEMANGLE (ptr);						      \
-       ptr ();								      \
-  } while (0)
+  GL(dl_wait_lookup_done) ()
 
 
 #endif /* __ASSEMBLER__ */

Modified: fsf/trunk/libc/nptl/tst-robust9.c
==============================================================================
--- fsf/trunk/libc/nptl/tst-robust9.c (original)
+++ fsf/trunk/libc/nptl/tst-robust9.c Wed May 23 00:03:40 2007
@@ -54,6 +54,13 @@
     }
 #endif
   err = pthread_mutex_init (&m, &ma);
+#ifdef ENABLE_PI
+  if (err == ENOTSUP)
+    {
+      puts ("PI robust mutexes not supported");
+      return 0;
+    }
+#endif
   if (err)
     {
       puts ("pthread_mutex_init");

Modified: fsf/trunk/libc/nscd/aicache.c
==============================================================================
--- fsf/trunk/libc/nscd/aicache.c (original)
+++ fsf/trunk/libc/nscd/aicache.c Wed May 23 00:03:40 2007
@@ -1,5 +1,5 @@
 /* Cache handling for host lookup.
-   Copyright (C) 2004, 2005, 2006 Free Software Foundation, Inc.
+   Copyright (C) 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@xxxxxxxxxx>, 2004.
 
@@ -77,15 +77,6 @@
       else
 	dbg_log (_("Reloading \"%s\" in hosts cache!"), (char *) key);
     }
-
-#if 0
-  uid_t oldeuid = 0;
-  if (db->secure)
-    {
-      oldeuid = geteuid ();
-      pthread_seteuid_np (uid);
-    }
-#endif
 
   static service_user *hosts_database;
   service_user *nip = NULL;
@@ -263,8 +254,10 @@
 
 	      total = sizeof (*dataset) + naddrs + addrslen + canonlen;
 
-	      /* Now we can allocate the data structure.  */
-	      if (he == NULL)
+	      /* Now we can allocate the data structure.  If the TTL
+		 of the entry is reported as zero do not cache the
+		 entry at all.  */
+	      if (ttl != 0 && he == NULL)
 		{
 		  dataset = (struct dataset *) mempool_alloc (db,
 							      total
@@ -453,11 +446,6 @@
  out:
   _res.options = old_res_options;
 
-#if 0
-  if (db->secure)
-    pthread_seteuid_np (oldeuid);
-#endif
-
   if (dataset != NULL && !alloca_used)
     {
       /* If necessary, we also propagate the data to disk.  */

Modified: fsf/trunk/libc/stdio-common/tst-sprintf.c
==============================================================================
--- fsf/trunk/libc/stdio-common/tst-sprintf.c (original)
+++ fsf/trunk/libc/stdio-common/tst-sprintf.c Wed May 23 00:03:40 2007
@@ -37,5 +37,26 @@
       free (dst);
     }
 
+  if (sprintf (buf, "%1$d%3$.*2$s%4$d", 7, 67108863, "x", 8) != 3
+      || strcmp (buf, "7x8") != 0)
+    {
+      printf ("sprintf (buf, \"%%1$d%%3$.*2$s%%4$d\", 7, 67108863, \"x\", 8) produced `%s' output", buf);
+      result = 1;
+    }
+
+  if (sprintf (buf, "%67108863.16\"%d", 7) != 14
+      || strcmp (buf, "%67108863.16\"7") != 0)
+    {
+      printf ("sprintf (buf, \"%%67108863.16\\\"%%d\", 7) produced `%s' output", buf);
+      result = 1;
+    }
+
+  if (sprintf (buf, "%*\"%d", 0x3ffffff, 7) != 11
+      || strcmp (buf, "%67108863\"7") != 0)
+    {
+      printf ("sprintf (buf, \"%%*\\\"%%d\", 0x3ffffff, 7) produced `%s' output", buf);
+      result = 1;
+    }
+
   return result;
 }

Modified: fsf/trunk/libc/stdio-common/vfprintf.c
==============================================================================
--- fsf/trunk/libc/stdio-common/vfprintf.c (original)
+++ fsf/trunk/libc/stdio-common/vfprintf.c Wed May 23 00:03:40 2007
@@ -1627,6 +1627,8 @@
     /* Just a counter.  */
     size_t cnt;
 
+    free (workstart);
+    workstart = NULL;
 
     if (grouping == (const char *) -1)
       {
@@ -1801,7 +1803,9 @@
 	int use_outdigits = specs[nspecs_done].info.i18n;
 	char pad = specs[nspecs_done].info.pad;
 	CHAR_T spec = specs[nspecs_done].info.spec;
-	CHAR_T *workstart = NULL;
+
+	workstart = NULL;
+	workend = &work_buffer[sizeof (work_buffer) / sizeof (CHAR_T)];
 
 	/* Fill in last information.  */
 	if (specs[nspecs_done].width_arg != -1)
@@ -1897,8 +1901,7 @@
 	    break;
 	  }
 
-	if (__builtin_expect (workstart != NULL, 0))
-	  free (workstart);
+	free (workstart);
 	workstart = NULL;
 
 	/* Write the following constant string.  */
@@ -1926,7 +1929,7 @@
 
 {
   int done = 0;
-  CHAR_T work_buffer[MAX (info->width, info->spec) + 32];
+  CHAR_T work_buffer[MAX (sizeof (info->width), sizeof (info->prec)) * 3];
   CHAR_T *const workend
     = &work_buffer[sizeof (work_buffer) / sizeof (CHAR_T)];
   register CHAR_T *w;

Modified: fsf/trunk/libc/sysdeps/unix/sysv/linux/Makefile
==============================================================================
Binary files - no diff available.

Added: fsf/trunk/libc/sysdeps/unix/sysv/linux/epoll_pwait.c
==============================================================================
--- fsf/trunk/libc/sysdeps/unix/sysv/linux/epoll_pwait.c (added)
+++ fsf/trunk/libc/sysdeps/unix/sysv/linux/epoll_pwait.c Wed May 23 00:03:40 2007
@@ -1,0 +1,69 @@
+/* Copyright (C) 2007 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <errno.h>
+#include <signal.h>
+#include <unistd.h>
+#include <sys/epoll.h>
+
+#include <sysdep-cancel.h>
+#include <sys/syscall.h>
+
+#ifdef __NR_epoll_pwait
+
+/* Wait for events on an epoll instance "epfd". Returns the number of
+   triggered events returned in "events" buffer. Or -1 in case of
+   error with the "errno" variable set to the specific error code. The
+   "events" parameter is a buffer that will contain triggered
+   events. The "maxevents" is the maximum number of events to be
+   returned ( usually size of "events" ). The "timeout" parameter
+   specifies the maximum wait time in milliseconds (-1 == infinite).
+   The thread's signal mask is temporarily and atomically replaced with
+   the one provided as parameter.  */
+
+int epoll_pwait (int epfd, struct epoll_event *events,
+		 int maxevents, int timeout,
+		 const sigset_t *set)
+{
+  if (SINGLE_THREAD_P)
+    return INLINE_SYSCALL (epoll_pwait, 6, epfd, events, maxevents, timeout,
+			   set, _NSIG / 8);
+
+  int oldtype = LIBC_CANCEL_ASYNC ();
+
+  int result = INLINE_SYSCALL (epoll_pwait, 6, epfd, events, maxevents,
+			       timeout, set, _NSIG / 8);
+
+  LIBC_CANCEL_RESET (oldtype);
+
+  return result;
+}
+
+#else
+
+int epoll_pwait (int epfd, struct epoll_event *events,
+		 int maxevents, int timeout,
+		 const sigset_t *set)
+{
+  __set_errno (ENOSYS);
+  return -1;
+}
+stub_warning (epoll_pwait)
+
+# include <stub-tag.h>
+#endif

Added: fsf/trunk/libc/sysdeps/unix/sysv/linux/i386/epoll_pwait.S
==============================================================================
--- fsf/trunk/libc/sysdeps/unix/sysv/linux/i386/epoll_pwait.S (added)
+++ fsf/trunk/libc/sysdeps/unix/sysv/linux/i386/epoll_pwait.S Wed May 23 00:03:40 2007
@@ -1,0 +1,80 @@
+/* Copyright (C) 2007 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+#define _ERRNO_H
+#include <bits/errno.h>
+#define _SIGNAL_H
+#include <bits/signum.h>
+
+
+	.text
+ENTRY (epoll_pwait)
+
+#ifdef __NR_epoll_pwait
+
+	/* Save registers.  */
+	pushl %ebp
+	cfi_adjust_cfa_offset (4)
+	pushl %ebx
+	cfi_adjust_cfa_offset (4)
+	pushl %esi
+	cfi_adjust_cfa_offset (4)
+	pushl %edi
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (edi, 0)
+	cfi_rel_offset (esi, 4)
+	cfi_rel_offset (ebx, 8)
+	cfi_rel_offset (ebp, 12)
+
+	movl 20(%esp), %ebx
+	movl 24(%esp), %ecx
+	movl 28(%esp), %edx
+	movl 32(%esp), %esi
+	movl 36(%esp), %edi
+	movl $_NSIG/8, %ebp
+	movl $__NR_epoll_pwait, %eax
+
+	ENTER_KERNEL
+
+	/* Restore registers.  */
+	popl %edi
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (edi)
+	popl %esi
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (esi)
+	popl %ebx
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (ebx)
+	popl %ebp
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (ebp)
+
+	/* If 0 > %eax > -4096 there was an error.  */
+	cmpl $-4096, %eax
+	ja SYSCALL_ERROR_LABEL
+
+	/* Successful; return the syscall's value.  */
+#else
+	movl $-ENOSYS, %eax
+	jmp SYSCALL_ERROR_LABEL
+#endif
+L(pseudo_end):
+	ret
+PSEUDO_END (epoll_pwait)

Modified: fsf/trunk/libc/sysdeps/unix/sysv/linux/syscalls.list
==============================================================================
--- fsf/trunk/libc/sysdeps/unix/sysv/linux/syscalls.list (original)
+++ fsf/trunk/libc/sysdeps/unix/sysv/linux/syscalls.list Wed May 23 00:03:40 2007
@@ -10,7 +10,6 @@
 epoll_create	EXTRA	epoll_create	i:i	epoll_create
 epoll_ctl	EXTRA	epoll_ctl	i:iiip	epoll_ctl
 epoll_wait	EXTRA	epoll_wait	Ci:ipii	epoll_wait
-epoll_pwait	EXTRA	epoll_pwait	Ci:ipiipi	epoll_pwait
 fdatasync	-	fdatasync	Ci:i	fdatasync
 flock		-	flock		i:ii	__flock		flock
 fork		-	fork		i:	__libc_fork	__fork fork

Modified: fsf/trunk/libc/sysdeps/unix/sysv/linux/x86_64/sys/epoll.h
==============================================================================
--- fsf/trunk/libc/sysdeps/unix/sysv/linux/x86_64/sys/epoll.h (original)
+++ fsf/trunk/libc/sysdeps/unix/sysv/linux/x86_64/sys/epoll.h Wed May 23 00:03:40 2007
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002,2003,2004,2005,2006 Free Software Foundation, Inc.
+/* Copyright (C) 2002,2003,2004,2005,2006,2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -21,6 +21,14 @@
 
 #include <stdint.h>
 #include <sys/types.h>
+
+/* Get __sigset_t.  */
+#include <bits/sigset.h>
+
+#ifndef __sigset_t_defined
+# define __sigset_t_defined
+typedef __sigset_t sigset_t;
+#endif
 
 
 enum EPOLL_EVENTS
@@ -105,6 +113,16 @@
 extern int epoll_wait (int __epfd, struct epoll_event *__events,
 		       int __maxevents, int __timeout);
 
+
+/* Same as epoll_wait, but the thread's signal mask is temporarily
+   and atomically replaced with the one provided as parameter.
+
+   This function is a cancellation point and therefore not marked with
+   __THROW.  */
+extern int epoll_pwait (int __epfd, struct epoll_event *__events,
+			int __maxevents, int __timeout,
+			__const __sigset_t *__ss);
+
 __END_DECLS
 
 #endif /* sys/epoll.h */

Modified: fsf/trunk/libc/sysdeps/unix/sysv/linux/x86_64/sysconf.c
==============================================================================
--- fsf/trunk/libc/sysdeps/unix/sysv/linux/x86_64/sysconf.c (original)
+++ fsf/trunk/libc/sysdeps/unix/sysv/linux/x86_64/sysconf.c Wed May 23 00:03:40 2007
@@ -1,5 +1,5 @@
 /* Get file-specific information about a file.  Linux version.
-   Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -24,328 +24,17 @@
 
 
 static long int linux_sysconf (int name);
-
-
-static const struct intel_02_cache_info
-{
-  unsigned int idx;
-  int name;
-  long int size;
-  long int assoc;
-  long int linesize;
-} intel_02_known[] =
-  {
-    { 0x06, _SC_LEVEL1_ICACHE_SIZE, 8192, 4, 32 },
-    { 0x08, _SC_LEVEL1_ICACHE_SIZE, 16384, 4, 32 },
-    { 0x0a, _SC_LEVEL1_DCACHE_SIZE, 8192, 2, 32 },
-    { 0x0c, _SC_LEVEL1_DCACHE_SIZE, 16384, 4, 32 },
-    { 0x22, _SC_LEVEL3_CACHE_SIZE, 524288, 4, 64 },
-    { 0x23, _SC_LEVEL3_CACHE_SIZE, 1048576, 8, 64 },
-    { 0x25, _SC_LEVEL3_CACHE_SIZE, 2097152, 8, 64 },
-    { 0x29, _SC_LEVEL3_CACHE_SIZE, 4194304, 8, 64 },
-    { 0x2c, _SC_LEVEL1_DCACHE_SIZE, 32768, 8, 64 },
-    { 0x30, _SC_LEVEL1_ICACHE_SIZE, 32768, 8, 64 },
-    { 0x39, _SC_LEVEL2_CACHE_SIZE, 131072, 4, 64 },
-    { 0x3a, _SC_LEVEL2_CACHE_SIZE, 196608, 6, 64 },
-    { 0x3b, _SC_LEVEL2_CACHE_SIZE, 131072, 2, 64 },
-    { 0x3c, _SC_LEVEL2_CACHE_SIZE, 262144, 4, 64 },
-    { 0x3d, _SC_LEVEL2_CACHE_SIZE, 393216, 6, 64 },
-    { 0x3e, _SC_LEVEL2_CACHE_SIZE, 524288, 4, 64 },
-    { 0x41, _SC_LEVEL2_CACHE_SIZE, 131072, 4, 32 },
-    { 0x42, _SC_LEVEL2_CACHE_SIZE, 262144, 4, 32 },
-    { 0x43, _SC_LEVEL2_CACHE_SIZE, 524288, 4, 32 },
-    { 0x44, _SC_LEVEL2_CACHE_SIZE, 1048576, 4, 32 },
-    { 0x45, _SC_LEVEL2_CACHE_SIZE, 2097152, 4, 32 },
-    { 0x46, _SC_LEVEL3_CACHE_SIZE, 4194304, 4, 64 },
-    { 0x47, _SC_LEVEL3_CACHE_SIZE, 8388608, 8, 64 },
-    { 0x49, _SC_LEVEL2_CACHE_SIZE, 4194304, 16, 64 },
-    { 0x4a, _SC_LEVEL3_CACHE_SIZE, 6291456, 12, 64 },
-    { 0x4b, _SC_LEVEL3_CACHE_SIZE, 8388608, 16, 64 },
-    { 0x4c, _SC_LEVEL3_CACHE_SIZE, 12582912, 12, 64 },
-    { 0x4d, _SC_LEVEL3_CACHE_SIZE, 16777216, 16, 64 },
-    { 0x60, _SC_LEVEL1_DCACHE_SIZE, 16384, 8, 64 },
-    { 0x66, _SC_LEVEL1_DCACHE_SIZE, 8192, 4, 64 },
-    { 0x67, _SC_LEVEL1_DCACHE_SIZE, 16384, 4, 64 },
-    { 0x68, _SC_LEVEL1_DCACHE_SIZE, 32768, 4, 64 },
-    { 0x78, _SC_LEVEL2_CACHE_SIZE, 1048576, 8, 64 },
-    { 0x79, _SC_LEVEL2_CACHE_SIZE, 131072, 8, 64 },
-    { 0x7a, _SC_LEVEL2_CACHE_SIZE, 262144, 8, 64 },
-    { 0x7b, _SC_LEVEL2_CACHE_SIZE, 524288, 8, 64 },
-    { 0x7c, _SC_LEVEL2_CACHE_SIZE, 1048576, 8, 64 },
-    { 0x7d, _SC_LEVEL2_CACHE_SIZE, 2097152, 8, 64 },
-    { 0x7f, _SC_LEVEL2_CACHE_SIZE, 524288, 2, 64 },
-    { 0x82, _SC_LEVEL2_CACHE_SIZE, 262144, 8, 32 },
-    { 0x83, _SC_LEVEL2_CACHE_SIZE, 524288, 8, 32 },
-    { 0x84, _SC_LEVEL2_CACHE_SIZE, 1048576, 8, 32 },
-    { 0x85, _SC_LEVEL2_CACHE_SIZE, 2097152, 8, 32 },
-    { 0x86, _SC_LEVEL2_CACHE_SIZE, 524288, 4, 64 },
-    { 0x87, _SC_LEVEL2_CACHE_SIZE, 1048576, 8, 64 },
-  };
-#define nintel_02_known (sizeof (intel_02_known) / sizeof (intel_02_known[0]))
-
-
-static int
-intel_02_known_compare (const void *p1, const void *p2)
-{
-  const struct intel_02_cache_info *i1;
-  const struct intel_02_cache_info *i2;
-
-  i1 = (const struct intel_02_cache_info *) p1;
-  i2 = (const struct intel_02_cache_info *) p2;
-
-  if (i1->idx == i2->idx)
-    return 0;
-
-  return i1->idx < i2->idx ? -1 : 1;
-}
-
-
-static long int
-__attribute__ ((noinline))
-intel_check_word (int name, unsigned int value, bool *has_level_2,
-		  bool *no_level_2_or_3)
-{
-  if ((value & 0x80000000) != 0)
-    /* The register value is reserved.  */
-    return 0;
-
-  /* Fold the name.  The _SC_ constants are always in the order SIZE,
-     ASSOC, LINESIZE.  */
-  int folded_name = (_SC_LEVEL1_ICACHE_SIZE
-		     + ((name - _SC_LEVEL1_ICACHE_SIZE) / 3) * 3);
-
-  while (value != 0)
-    {
-      unsigned int byte = value & 0xff;
-
-      if (byte == 0x40)
-	{
-	  *no_level_2_or_3 = true;
-
-	  if (folded_name == _SC_LEVEL3_CACHE_SIZE)
-	    /* No need to look further.  */
-	    break;
-	}
-      else
-	{
-	  if (byte == 0x49 && folded_name == _SC_LEVEL3_CACHE_SIZE)
-	    {
-	      /* Intel reused this value.  For family 15, model 6 it
-		 specifies the 3rd level cache.  Otherwise the 2nd
-		 level cache.  */
-	      unsigned int eax;
-	      unsigned int ebx;
-	      unsigned int ecx;
-	      unsigned int edx;
-	      asm volatile ("xchgl %%ebx, %1; cpuid; xchgl %%ebx, %1"
-			    : "=a" (eax), "=r" (ebx), "=c" (ecx), "=d" (edx)
-			    : "0" (1));
-
-	      unsigned int family = ((eax >> 20) & 0xff) + ((eax >> 8) & 0xf);
-	      unsigned int model = ((((eax >>16) & 0xf) << 4)
-				    + ((eax >> 4) & 0xf));
-	      if (family == 15 && model == 6)
-		{
-		  /* The level 3 cache is encoded for this model like
-		     the level 2 cache is for other models.  Pretend
-		     the caller asked for the level 2 cache.  */
-		  name = (_SC_LEVEL2_CACHE_SIZE
-			  + (name - _SC_LEVEL3_CACHE_SIZE));
-		  folded_name = _SC_LEVEL3_CACHE_SIZE;
-		}
-	    }
-
-	  struct intel_02_cache_info *found;
-	  struct intel_02_cache_info search;
-
-	  search.idx = byte;
-	  found = bsearch (&search, intel_02_known, nintel_02_known,
-			   sizeof (intel_02_known[0]), intel_02_known_compare);
-	  if (found != NULL)
-	    {
-	      if (found->name == folded_name)
-		{
-		  unsigned int offset = name - folded_name;
-
-		  if (offset == 0)
-		    /* Cache size.  */
-		    return found->size;
-		  if (offset == 1)
-		    return found->assoc;
-
-		  assert (offset == 2);
-		  return found->linesize;
-		}
-
-	      if (found->name == _SC_LEVEL2_CACHE_SIZE)
-		*has_level_2 = true;
-	    }
-	}
-
-      /* Next byte for the next round.  */
-      value >>= 8;
-    }
-
-  /* Nothing found.  */
-  return 0;
-}
-
-
-static long int __attribute__ ((noinline))
-handle_intel (int name, unsigned int maxidx)
-{
-  assert (maxidx >= 2);
-
-  /* OK, we can use the CPUID instruction to get all info about the
-     caches.  */
-  unsigned int cnt = 0;
-  unsigned int max = 1;
-  long int result = 0;
-  bool no_level_2_or_3 = false;
-  bool has_level_2 = false;
-  while (cnt++ < max)
-    {
-      unsigned int eax;
-      unsigned int ebx;
-      unsigned int ecx;
-      unsigned int edx;
-      asm volatile ("xchgl %%ebx, %1; cpuid; xchgl %%ebx, %1"
-		    : "=a" (eax), "=r" (ebx), "=c" (ecx), "=d" (edx)
-		    : "0" (2));
-
-      /* The low byte of EAX in the first round contain the number of
-	 rounds we have to make.  At least one, the one we are already
-	 doing.  */
-      if (cnt == 1)
-	{
-	  max = eax & 0xff;
-	  eax &= 0xffffff00;
-	}
-
-      /* Process the individual registers' value.  */
-      result = intel_check_word (name, eax, &has_level_2, &no_level_2_or_3);
-      if (result != 0)
-	return result;
-
-      result = intel_check_word (name, ebx, &has_level_2, &no_level_2_or_3);
-      if (result != 0)
-	return result;
-
-      result = intel_check_word (name, ecx, &has_level_2, &no_level_2_or_3);
-      if (result != 0)
-	return result;
-
-      result = intel_check_word (name, edx, &has_level_2, &no_level_2_or_3);
-      if (result != 0)
-	return result;
-    }
-
-  if (name >= _SC_LEVEL2_CACHE_SIZE && name <= _SC_LEVEL3_CACHE_LINESIZE
-      && no_level_2_or_3)
-    return -1;
-
-  return 0;
-}
-
-
-static long int __attribute__ ((noinline))
-handle_amd (int name)
-{
-  unsigned int eax;
-  unsigned int ebx;
-  unsigned int ecx;
-  unsigned int edx;
-  asm volatile ("xchgl %%ebx, %1; cpuid; xchgl %%ebx, %1"
-		: "=a" (eax), "=r" (ebx), "=c" (ecx), "=d" (edx)
-		: "0" (0x80000000));
-
-  if (name >= _SC_LEVEL3_CACHE_SIZE)
-    return 0;
-
-  unsigned int fn = 0x80000005 + (name >= _SC_LEVEL2_CACHE_SIZE);
-  if (eax < fn)
-    return 0;
-
-  asm volatile ("xchgl %%ebx, %1; cpuid; xchgl %%ebx, %1"
-		: "=a" (eax), "=r" (ebx), "=c" (ecx), "=d" (edx)
-		: "0" (fn));
-
-  if (name < _SC_LEVEL1_DCACHE_SIZE)
-    {
-      name += _SC_LEVEL1_DCACHE_SIZE - _SC_LEVEL1_ICACHE_SIZE;
-      ecx = edx;
-    }
-
-  switch (name)
-    {
-    case _SC_LEVEL1_DCACHE_SIZE:
-      return (ecx >> 14) & 0x3fc00;
-    case _SC_LEVEL1_DCACHE_ASSOC:
-      ecx >>= 16;
-      if ((ecx & 0xff) == 0xff)
-	/* Fully associative.  */
-	return (ecx << 2) & 0x3fc00;
-      return ecx & 0xff;
-    case _SC_LEVEL1_DCACHE_LINESIZE:
-      return ecx & 0xff;
-    case _SC_LEVEL2_CACHE_SIZE:
-      return (ecx & 0xf000) == 0 ? 0 : (ecx >> 6) & 0x3fffc00;
-    case _SC_LEVEL2_CACHE_ASSOC:
-      ecx >>= 12;
-      switch (ecx & 0xf)
-        {
-        case 0:
-        case 1:
-        case 2:
-        case 4:
-	  return ecx & 0xf;
-	case 6:
-	  return 8;
-	case 8:
-	  return 16;
-	case 0xf:
-	  return (ecx << 6) & 0x3fffc00;
-	default:
-	  return 0;
-        }
-    case _SC_LEVEL2_CACHE_LINESIZE:
-      return (ecx & 0xf000) == 0 ? 0 : ecx & 0xff;
-    default:
-      assert (! "cannot happen");
-    }
-  return -1;
-}
+extern long int __cache_sysconf (int) attribute_hidden;
 
 
 /* Get the value of the system variable NAME.  */
 long int
 __sysconf (int name)
 {
-  /* We only handle the cache information here (for now).  */
-  if (name < _SC_LEVEL1_ICACHE_SIZE || name > _SC_LEVEL4_CACHE_LINESIZE)
-    return linux_sysconf (name);
+  if (name >= _SC_LEVEL1_ICACHE_SIZE && name <= _SC_LEVEL4_CACHE_LINESIZE)
+    return __cache_sysconf (name);
 
-  /* Find out what brand of processor.  */
-  unsigned int eax;
-  unsigned int ebx;
-  unsigned int ecx;
-  unsigned int edx;
-  asm volatile ("xchgl %%ebx, %1; cpuid; xchgl %%ebx, %1"
-		: "=a" (eax), "=r" (ebx), "=c" (ecx), "=d" (edx)
-		: "0" (0));
-
-  /* This spells out "GenuineIntel".  */
-  if (ebx == 0x756e6547 && ecx == 0x6c65746e && edx == 0x49656e69)
-    return handle_intel (name, eax);
-
-  /* This spells out "AuthenticAMD".  */
-  if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65)
-    return handle_amd (name);
-
-  // XXX Fill in more vendors.
-
-  /* CPU not known, we have no information.  */
-  return 0;
+  return linux_sysconf (name);
 }
 
 /* Now the generic Linux version.  */

Modified: fsf/trunk/libc/sysdeps/x86_64/Makefile
==============================================================================
--- fsf/trunk/libc/sysdeps/x86_64/Makefile (original)
+++ fsf/trunk/libc/sysdeps/x86_64/Makefile Wed May 23 00:03:40 2007
@@ -9,3 +9,7 @@
 ifeq ($(subdir),gmon)
 sysdep_routines += _mcount
 endif
+
+ifeq ($(subdir),string)
+sysdep_routines += cacheinfo
+endif

Added: fsf/trunk/libc/sysdeps/x86_64/cacheinfo.c
==============================================================================
--- fsf/trunk/libc/sysdeps/x86_64/cacheinfo.c (added)
+++ fsf/trunk/libc/sysdeps/x86_64/cacheinfo.c Wed May 23 00:03:40 2007
@@ -1,0 +1,451 @@
+/* x86_64 cache info.
+   Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.
+*/
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+static const struct intel_02_cache_info
+{
+  unsigned int idx;
+  int name;
+  long int size;
+  long int assoc;
+  long int linesize;
+} intel_02_known [] =
+  {
+    { 0x06, _SC_LEVEL1_ICACHE_SIZE,    8192,  4, 32 },
+    { 0x08, _SC_LEVEL1_ICACHE_SIZE,   16384,  4, 32 },
+    { 0x0a, _SC_LEVEL1_DCACHE_SIZE,    8192,  2, 32 },
+    { 0x0c, _SC_LEVEL1_DCACHE_SIZE,   16384,  4, 32 },
+    { 0x22, _SC_LEVEL3_CACHE_SIZE,   524288,  4, 64 },
+    { 0x23, _SC_LEVEL3_CACHE_SIZE,  1048576,  8, 64 },
+    { 0x25, _SC_LEVEL3_CACHE_SIZE,  2097152,  8, 64 },
+    { 0x29, _SC_LEVEL3_CACHE_SIZE,  4194304,  8, 64 },
+    { 0x2c, _SC_LEVEL1_DCACHE_SIZE,   32768,  8, 64 },
+    { 0x30, _SC_LEVEL1_ICACHE_SIZE,   32768,  8, 64 },
+    { 0x39, _SC_LEVEL2_CACHE_SIZE,   131072,  4, 64 },
+    { 0x3a, _SC_LEVEL2_CACHE_SIZE,   196608,  6, 64 },
+    { 0x3b, _SC_LEVEL2_CACHE_SIZE,   131072,  2, 64 },
+    { 0x3c, _SC_LEVEL2_CACHE_SIZE,   262144,  4, 64 },
+    { 0x3d, _SC_LEVEL2_CACHE_SIZE,   393216,  6, 64 },
+    { 0x3e, _SC_LEVEL2_CACHE_SIZE,   524288,  4, 64 },
+    { 0x41, _SC_LEVEL2_CACHE_SIZE,   131072,  4, 32 },
+    { 0x42, _SC_LEVEL2_CACHE_SIZE,   262144,  4, 32 },
+    { 0x43, _SC_LEVEL2_CACHE_SIZE,   524288,  4, 32 },
+    { 0x44, _SC_LEVEL2_CACHE_SIZE,  1048576,  4, 32 },
+    { 0x45, _SC_LEVEL2_CACHE_SIZE,  2097152,  4, 32 },
+    { 0x46, _SC_LEVEL3_CACHE_SIZE,  4194304,  4, 64 },
+    { 0x47, _SC_LEVEL3_CACHE_SIZE,  8388608,  8, 64 },
+    { 0x49, _SC_LEVEL2_CACHE_SIZE,  4194304, 16, 64 },
+    { 0x4a, _SC_LEVEL3_CACHE_SIZE,  6291456, 12, 64 },
+    { 0x4b, _SC_LEVEL3_CACHE_SIZE,  8388608, 16, 64 },
+    { 0x4c, _SC_LEVEL3_CACHE_SIZE, 12582912, 12, 64 },
+    { 0x4d, _SC_LEVEL3_CACHE_SIZE, 16777216, 16, 64 },
+    { 0x60, _SC_LEVEL1_DCACHE_SIZE,   16384,  8, 64 },
+    { 0x66, _SC_LEVEL1_DCACHE_SIZE,    8192,  4, 64 },
+    { 0x67, _SC_LEVEL1_DCACHE_SIZE,   16384,  4, 64 },
+    { 0x68, _SC_LEVEL1_DCACHE_SIZE,   32768,  4, 64 },
+    { 0x78, _SC_LEVEL2_CACHE_SIZE,  1048576,  8, 64 },
+    { 0x79, _SC_LEVEL2_CACHE_SIZE,   131072,  8, 64 },
+    { 0x7a, _SC_LEVEL2_CACHE_SIZE,   262144,  8, 64 },
+    { 0x7b, _SC_LEVEL2_CACHE_SIZE,   524288,  8, 64 },
+    { 0x7c, _SC_LEVEL2_CACHE_SIZE,  1048576,  8, 64 },
+    { 0x7d, _SC_LEVEL2_CACHE_SIZE,  2097152,  8, 64 },
+    { 0x7f, _SC_LEVEL2_CACHE_SIZE,   524288,  2, 64 },
+    { 0x82, _SC_LEVEL2_CACHE_SIZE,   262144,  8, 32 },
+    { 0x83, _SC_LEVEL2_CACHE_SIZE,   524288,  8, 32 },
+    { 0x84, _SC_LEVEL2_CACHE_SIZE,  1048576,  8, 32 },
+    { 0x85, _SC_LEVEL2_CACHE_SIZE,  2097152,  8, 32 },
+    { 0x86, _SC_LEVEL2_CACHE_SIZE,   524288,  4, 64 },
+    { 0x87, _SC_LEVEL2_CACHE_SIZE,  1048576,  8, 64 },
+  };
+
+#define nintel_02_known (sizeof (intel_02_known) / sizeof (intel_02_known [0]))
+
+static int
+intel_02_known_compare (const void *p1, const void *p2)
+{
+  const struct intel_02_cache_info *i1;
+  const struct intel_02_cache_info *i2;
+
+  i1 = (const struct intel_02_cache_info *) p1;
+  i2 = (const struct intel_02_cache_info *) p2;
+
+  if (i1->idx == i2->idx)
+    return 0;
+
+  return i1->idx < i2->idx ? -1 : 1;
+}
+
+
+static long int
+__attribute__ ((noinline))
+intel_check_word (int name, unsigned int value, bool *has_level_2,
+		  bool *no_level_2_or_3)
+{
+  if ((value & 0x80000000) != 0)
+    /* The register value is reserved.  */
+    return 0;
+
+  /* Fold the name.  The _SC_ constants are always in the order SIZE,
+     ASSOC, LINESIZE.  */
+  int folded_name = (_SC_LEVEL1_ICACHE_SIZE
+		     + ((name - _SC_LEVEL1_ICACHE_SIZE) / 3) * 3);
+
+  while (value != 0)
+    {
+      unsigned int byte = value & 0xff;
+
+      if (byte == 0x40)
+	{
+	  *no_level_2_or_3 = true;
+
+	  if (folded_name == _SC_LEVEL3_CACHE_SIZE)
+	    /* No need to look further.  */
+	    break;
+	}
+      else
+	{
+	  if (byte == 0x49 && folded_name == _SC_LEVEL3_CACHE_SIZE)
+	    {
+	      /* Intel reused this value.  For family 15, model 6 it
+		 specifies the 3rd level cache.  Otherwise the 2nd
+		 level cache.  */
+	      unsigned int eax;
+	      unsigned int ebx;
+	      unsigned int ecx;
+	      unsigned int edx;
+	      asm volatile ("cpuid"
+			    : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+			    : "0" (1));
+
+	      unsigned int family = ((eax >> 20) & 0xff) + ((eax >> 8) & 0xf);
+	      unsigned int model = ((((eax >>16) & 0xf) << 4)
+				    + ((eax >> 4) & 0xf));
+	      if (family == 15 && model == 6)
+		{
+		  /* The level 3 cache is encoded for this model like
+		     the level 2 cache is for other models.  Pretend
+		     the caller asked for the level 2 cache.  */
+		  name = (_SC_LEVEL2_CACHE_SIZE
+			  + (name - _SC_LEVEL3_CACHE_SIZE));
+		  folded_name = _SC_LEVEL3_CACHE_SIZE;
+		}
+	    }
+
+	  struct intel_02_cache_info *found;
+	  struct intel_02_cache_info search;
+
+	  search.idx = byte;
+	  found = bsearch (&search, intel_02_known, nintel_02_known,
+			   sizeof (intel_02_known[0]), intel_02_known_compare);
+	  if (found != NULL)
+	    {
+	      if (found->name == folded_name)
+		{
+		  unsigned int offset = name - folded_name;
+
+		  if (offset == 0)
+		    /* Cache size.  */
+		    return found->size;
+		  if (offset == 1)
+		    return found->assoc;
+
+		  assert (offset == 2);
+		  return found->linesize;
+		}
+
+	      if (found->name == _SC_LEVEL2_CACHE_SIZE)
+		*has_level_2 = true;
+	    }
+	}
+
+      /* Next byte for the next round.  */
+      value >>= 8;
+    }
+
+  /* Nothing found.  */
+  return 0;
+}
+
+
+static long int __attribute__ ((noinline))
+handle_intel (int name, unsigned int maxidx)
+{
+  assert (maxidx >= 2);
+
+  /* OK, we can use the CPUID instruction to get all info about the
+     caches.  */
+  unsigned int cnt = 0;
+  unsigned int max = 1;
+  long int result = 0;
+  bool no_level_2_or_3 = false;
+  bool has_level_2 = false;
+
+  while (cnt++ < max)
+    {
+      unsigned int eax;
+      unsigned int ebx;
+      unsigned int ecx;
+      unsigned int edx;
+      asm volatile ("cpuid"
+		    : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+		    : "0" (2));
+
+      /* The low byte of EAX in the first round contain the number of
+	 rounds we have to make.  At least one, the one we are already
+	 doing.  */
+      if (cnt == 1)
+	{
+	  max = eax & 0xff;
+	  eax &= 0xffffff00;
+	}
+
+      /* Process the individual registers' value.  */
+      result = intel_check_word (name, eax, &has_level_2, &no_level_2_or_3);
+      if (result != 0)
+	return result;
+
+      result = intel_check_word (name, ebx, &has_level_2, &no_level_2_or_3);
+      if (result != 0)
+	return result;
+
+      result = intel_check_word (name, ecx, &has_level_2, &no_level_2_or_3);
+      if (result != 0)
+	return result;
+
+      result = intel_check_word (name, edx, &has_level_2, &no_level_2_or_3);
+      if (result != 0)
+	return result;
+    }
+
+  if (name >= _SC_LEVEL2_CACHE_SIZE && name <= _SC_LEVEL3_CACHE_LINESIZE
+      && no_level_2_or_3)
+    return -1;
+
+  return 0;
+}
+
+
+static long int __attribute__ ((noinline))
+handle_amd (int name)
+{
+  unsigned int eax;
+  unsigned int ebx;
+  unsigned int ecx;
+  unsigned int edx;
+  asm volatile ("cpuid"
+		: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+		: "0" (0x80000000));
+
+  if (name >= _SC_LEVEL3_CACHE_SIZE)
+    return 0;
+
+  unsigned int fn = 0x80000005 + (name >= _SC_LEVEL2_CACHE_SIZE);
+  if (eax < fn)
+    return 0;
+
+  asm volatile ("cpuid"
+		: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+		: "0" (fn));
+
+  if (name < _SC_LEVEL1_DCACHE_SIZE)
+    {
+      name += _SC_LEVEL1_DCACHE_SIZE - _SC_LEVEL1_ICACHE_SIZE;
+      ecx = edx;
+    }
+
+  switch (name)
+    {
+    case _SC_LEVEL1_DCACHE_SIZE:
+      return (ecx >> 14) & 0x3fc00;
+    case _SC_LEVEL1_DCACHE_ASSOC:
+      ecx >>= 16;
+      if ((ecx & 0xff) == 0xff)
+	/* Fully associative.  */
+	return (ecx << 2) & 0x3fc00;
+      return ecx & 0xff;
+    case _SC_LEVEL1_DCACHE_LINESIZE:
+      return ecx & 0xff;
+    case _SC_LEVEL2_CACHE_SIZE:
+      return (ecx & 0xf000) == 0 ? 0 : (ecx >> 6) & 0x3fffc00;
+    case _SC_LEVEL2_CACHE_ASSOC:
+      ecx >>= 12;
+      switch (ecx & 0xf)
+        {
+        case 0:
+        case 1:
+        case 2:
+        case 4:
+	  return ecx & 0xf;
+	case 6:
+	  return 8;
+	case 8:
+	  return 16;
+	case 0xf:
+	  return (ecx << 6) & 0x3fffc00;
+	default:
+	  return 0;
+        }
+    case _SC_LEVEL2_CACHE_LINESIZE:
+      return (ecx & 0xf000) == 0 ? 0 : ecx & 0xff;
+    default:
+      assert (! "cannot happen");
+    }
+  return -1;
+}
+
+
+/* Get the value of the system variable NAME.  */
+long int
+attribute_hidden
+__cache_sysconf (int name)
+{
+  /* Find out what brand of processor.  */
+  unsigned int eax;
+  unsigned int ebx;
+  unsigned int ecx;
+  unsigned int edx;
+  asm volatile ("cpuid"
+		: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+		: "0" (0));
+
+  /* This spells out "GenuineIntel".  */
+  if (ebx == 0x756e6547 && ecx == 0x6c65746e && edx == 0x49656e69)
+    return handle_intel (name, eax);
+
+  /* This spells out "AuthenticAMD".  */
+  if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65)
+    return handle_amd (name);
+
+  // XXX Fill in more vendors.
+
+  /* CPU not known, we have no information.  */
+  return 0;
+}
+
+
+/* Half the core cache size for use in memory and string routines, typically
+   L1 size. */
+long int __x86_64_core_cache_size_half attribute_hidden = 32 * 1024 / 2;
+/* Shared cache size for use in memory and string routines, typically
+   L2 or L3 size. */
+long int __x86_64_shared_cache_size_half attribute_hidden = 1024 * 1024 / 2;
+/* PREFETCHW support flag for use in memory and string routines. */
+int __x86_64_prefetchw attribute_hidden;
+
+
+static void
+__attribute__((constructor))
+init_cacheinfo (void)
+{
+  /* Find out what brand of processor.  */
+  unsigned int eax;
+  unsigned int ebx;
+  unsigned int ecx;
+  unsigned int edx;
+  int max_cpuid;
+  int max_cpuid_ex;
+  long int core = -1;
+  long int shared = -1;
+  unsigned int level;
+  unsigned int threads = 0;
+
+  asm volatile ("cpuid"
+		: "=a" (max_cpuid), "=b" (ebx), "=c" (ecx), "=d" (edx)
+		: "0" (0));
+
+  /* This spells out "GenuineIntel".  */
+  if (ebx == 0x756e6547 && ecx == 0x6c65746e && edx == 0x49656e69)
+    {
+      core = handle_intel (_SC_LEVEL1_DCACHE_SIZE, max_cpuid);
+
+      /* Try L3 first. */
+      level  = 3;
+      shared = handle_intel (_SC_LEVEL3_CACHE_SIZE, max_cpuid);
+
+      if (shared <= 0)
+        {
+	  /* Try L2 otherwise. */
+          level  = 2;
+          shared = handle_intel (_SC_LEVEL2_CACHE_SIZE, max_cpuid);
+	}
+
+      /* Figure out the number of logical threads that share the
+	 highest cache level. */
+      if (max_cpuid >= 4)
+        {
+	  int i = 0;
+
+	  /* Query until desired cache level is enumerated. */
+	  do
+	    {
+              asm volatile ("cpuid"
+		            : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+		            : "0" (4), "2" (i++));
+	    }
+          while (((eax >> 5) & 0x7) != level);
+
+	  threads = ((eax >> 14) & 0x3ff) + 1;
+	}
+      else
+        {
+	  /* Assume that all logical threads share the highest cache level. */
+          asm volatile ("cpuid"
+		        : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+		        : "0" (1));
+
+	  threads = (ebx >> 16) & 0xff;
+	}
+
+      /* Cap usage of highest cache level to the number of supported
+	 threads. */
+      if (shared > 0 && threads > 0)
+        shared /= threads;
+    }
+  /* This spells out "AuthenticAMD".  */
+  else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65)
+    {
+      core   = handle_amd (_SC_LEVEL1_DCACHE_SIZE);
+      shared = handle_amd (_SC_LEVEL2_CACHE_SIZE);
+
+      asm volatile ("cpuid"
+		    : "=a" (max_cpuid_ex), "=b" (ebx), "=c" (ecx), "=d" (edx)
+		    : "0" (0x80000000));
+
+      if (max_cpuid_ex >= 0x80000001)
+	{
+	  asm volatile ("cpuid"
+			: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+			: "0" (0x80000001));
+	  /*  PREFETCHW     || 3DNow! */
+	  if ((ecx & 0x100) || (edx & 0x80000000))
+	    __x86_64_prefetchw = -1;
+	}
+    }
+
+  if (core > 0)
+    __x86_64_core_cache_size_half = core / 2;
+
+  if (shared > 0)
+    __x86_64_shared_cache_size_half = shared / 2;
+}

Modified: fsf/trunk/libc/sysdeps/x86_64/memcpy.S
==============================================================================
--- fsf/trunk/libc/sysdeps/x86_64/memcpy.S (original)
+++ fsf/trunk/libc/sysdeps/x86_64/memcpy.S Wed May 23 00:03:40 2007
@@ -1,7 +1,10 @@
-/* Highly optimized version for x86-64.
-   Copyright (C) 1997, 2000, 2002, 2003, 2004 Free Software Foundation, Inc.
+/*
+   Optimized memcpy for x86-64.
+
+   Copyright (C) 2007 Free Software Foundation, Inc.
+   Contributed by Evandro Menezes <evandro.menezes@xxxxxxx>, 2007.
+
    This file is part of the GNU C Library.
-   Based on i586 version contributed by Ulrich Drepper <drepper@xxxxxxxxxx>, 1997.
 
    The GNU C Library is free software; you can redistribute it and/or
    modify it under the terms of the GNU Lesser General Public
@@ -16,86 +19,556 @@
    You should have received a copy of the GNU Lesser General Public
    License along with the GNU C Library; if not, write to the Free
    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
-   02111-1307 USA.  */
+   02111-1307 USA.
+*/
 
 #include <sysdep.h>
 #include "asm-syntax.h"
-#include "bp-sym.h"
-#include "bp-asm.h"
-
-/* BEWARE: `#ifdef memcpy' means that memcpy is redefined as `mempcpy',
-   and the return value is the byte after the last one copied in
-   the destination. */
-#define MEMPCPY_P (defined memcpy)
+
+/* Stack slots in the red-zone. */
+
+#ifdef USE_AS_MEMPCPY
+#  define RETVAL	(0)
+#else
+#  define RETVAL	(-8)
+#endif
+#define SAVE0	(RETVAL - 8)
+#define SAVE1	(SAVE0	- 8)
+#define SAVE2	(SAVE1	- 8)
+#define SAVE3	(SAVE2	- 8)
 
         .text
+
 #if defined PIC && !defined NOT_IN_libc
 ENTRY (__memcpy_chk)
+
 	cmpq	%rdx, %rcx
 	jb	HIDDEN_JUMPTARGET (__chk_fail)
+
 END (__memcpy_chk)
 #endif
-ENTRY (BP_SYM (memcpy))
-	/* Cutoff for the big loop is a size of 32 bytes since otherwise
-	   the loop will never be entered.  */
+
+ENTRY(memcpy)				/* (void *, const void*, size_t) */
+
+/* Handle tiny blocks. */
+
+L(1try):				/* up to 32B */
 	cmpq	$32, %rdx
-	movq	%rdx, %rcx
-#if !MEMPCPY_P
-	movq	%rdi, %r10	/* Save value. */
-#endif
-
-	/* We need this in any case.  */
-	cld
-
-	jbe	1f
-
-	/* Align destination.  */
-	movq	%rdi, %rax
-	negq	%rax
-	andq	$7, %rax
-	subq	%rax, %rcx
-	xchgq	%rax, %rcx
-
-	rep; movsb
-
-	movq	%rax, %rcx
-	subq	$32, %rcx
-	js	2f
-
-	.p2align 4
-3:
-
-	/* Now correct the loop counter.  Please note that in the following
-	   code the flags are not changed anymore.  */
-	subq	$32, %rcx
+#ifndef USE_AS_MEMPCPY
+	movq	%rdi, %rax		/* save return value */
+#endif
+	jae	L(1after)
+
+L(1):					/* 1-byte once */
+	testb	$1, %dl
+	jz	L(1a)
+
+	movzbl	(%rsi),	%ecx
+	movb	%cl, (%rdi)
+
+	incq	%rsi
+	incq	%rdi
+
+	.p2align 4,, 4
+
+L(1a):					/* 2-byte once */
+	testb	$2, %dl
+	jz	L(1b)
+
+	movzwl	(%rsi),	%ecx
+	movw	%cx, (%rdi)
+
+	addq	$2, %rsi
+	addq	$2, %rdi
+
+	.p2align 4,, 4
+
+L(1b):					/* 4-byte once */
+	testb	$4, %dl
+	jz	L(1c)
+
+	movl	(%rsi),	%ecx
+	movl	%ecx, (%rdi)
+
+	addq	$4, %rsi
+	addq	$4, %rdi
+
+	.p2align 4,, 4
+
+L(1c):					/* 8-byte once */
+	testb	$8, %dl
+	jz	L(1d)
+
+	movq	(%rsi), %rcx
+	movq	%rcx, (%rdi)
+
+	addq	$8, %rsi
+	addq	$8, %rdi
+
+	.p2align 4,, 4
+
+L(1d):					/* 16-byte loop */
+	andl	$0xf0, %edx
+	jz	L(exit)
+
+	.p2align 4
+
+L(1loop):
+	movq	  (%rsi), %rcx
+	movq	8 (%rsi), %r8
+	movq	%rcx,   (%rdi)
+	movq	 %r8, 8 (%rdi)
+
+	subl	$16, %edx
+
+	leaq	16 (%rsi), %rsi
+	leaq	16 (%rdi), %rdi
+
+	jnz	L(1loop)
+
+	.p2align 4,, 4
+
+L(exit):				/* exit */
+#ifdef USE_AS_MEMPCPY
+	movq	%rdi, %rax		/* return value */
+#else
+	rep
+#endif
+	retq
+
+	.p2align 4
+
+L(1after):
+#ifndef USE_AS_MEMPCPY
+	movq	%rax, RETVAL (%rsp)	/* save return value */
+#endif
+
+/* Align to the natural word size. */
+
+L(aligntry):
+	movl	%esi, %ecx      	/* align by destination */
+
+	andl	$7, %ecx
+	jz	L(alignafter)  		/* already aligned */
+
+L(align):		      		/* align */
+	leaq	-8 (%rcx, %rdx), %rdx	/* calculate remaining bytes */
+	subl	$8, %ecx
+
+	.p2align 4
+
+L(alignloop):				/* 1-byte alignment loop */
+	movzbl	(%rsi), %eax
+	movb	%al, (%rdi)
+
+	incl	%ecx
+
+	leaq	1 (%rsi), %rsi
+	leaq	1 (%rdi), %rdi
+
+	jnz	L(alignloop)
+
+	.p2align 4
+
+L(alignafter):
+
+/* Loop to handle mid-sized blocks. */
+
+L(32try):				/* up to 1KB */
+	cmpq	$1024, %rdx
+	ja	L(32after)
+
+L(32):					/* 32-byte loop */
+	movl	%edx, %ecx
+	shrl	$5, %ecx
+	jz	L(32skip)
+
+	.p2align 4
+
+L(32loop):
+	decl	%ecx
 
 	movq	(%rsi), %rax
-	movq	8(%rsi), %rdx
-	movq	16(%rsi), %r8
-	movq	24(%rsi), %r9
+	movq	 8 (%rsi), %r8
+	movq	16 (%rsi), %r9
+	movq	24 (%rsi), %r10
+
 	movq	%rax, (%rdi)
-	movq	%rdx, 8(%rdi)
-	movq	%r8, 16(%rdi)
-	movq	%r9, 24(%rdi)
+	movq	 %r8,  8 (%rdi)
+	movq	 %r9, 16 (%rdi)
+	movq	%r10, 24 (%rdi)
 
 	leaq	32(%rsi), %rsi
 	leaq	32(%rdi), %rdi
 
-	jns	3b
-
-	/* Correct extra loop counter modification.  */
-2:	addq	$32, %rcx
-1:	rep; movsb
-
-#if MEMPCPY_P
-	movq	%rdi, %rax		/* Set return value.  */
-#else
-	movq	%r10, %rax		/* Set return value.  */
+	jz	L(32skip)		/* help out smaller blocks */
+
+	decl	%ecx
+
+	movq	   (%rsi), %rax
+	movq	 8 (%rsi), %r8
+	movq	16 (%rsi), %r9
+	movq	24 (%rsi), %r10
+
+	movq	%rax,    (%rdi)
+	movq	 %r8,  8 (%rdi)
+	movq	 %r9, 16 (%rdi)
+	movq	%r10, 24 (%rdi)
+
+	leaq	32 (%rsi), %rsi
+	leaq	32 (%rdi), %rdi
+
+	jnz	L(32loop)
+
+	.p2align 4
+
+L(32skip):
+	andl	$31, %edx		/* check for left overs */
+#ifdef USE_AS_MEMPCPY
+	jnz	L(1)
+
+	movq	%rdi, %rax
+#else
+	movq	RETVAL (%rsp), %rax
+	jnz	L(1)
 	
-#endif
-	ret
-
-END (BP_SYM (memcpy))
-#if !MEMPCPY_P
+	rep
+#endif
+	retq				/* exit */
+
+	.p2align 4
+
+L(32after):
+
+/*
+	In order to minimize code-size in RTLD, algorithms specific for
+	larger blocks are excluded when building for RTLD.
+*/
+
+/* Handle large blocks smaller than 1/2 L1. */
+
+L(fasttry):				/* first 1/2 L1 */
+#ifndef NOT_IN_libc			/* only up to this algorithm outside of libc.so */
+	movq	__x86_64_core_cache_size_half (%rip), %r11
+	cmpq	%rdx, %r11		/* calculate the smaller of */
+	cmovaq	%rdx, %r11		/* remaining bytes and 1/2 L1 */
+#endif
+
+L(fast):				/* good ol' MOVS */
+#ifndef NOT_IN_libc
+	movq	%r11, %rcx
+	andq	$-8, %r11
+#else
+	movq	%rdx, %rcx
+#endif
+	shrq	$3, %rcx
+	jz	L(fastskip)
+
+	rep
+	movsq
+
+	.p2align 4,, 4
+
+L(fastskip):
+#ifndef NOT_IN_libc
+	subq	%r11, %rdx		/* check for more */
+	testq	$-8, %rdx
+	jnz	L(fastafter)
+#endif
+
+	andl	$7, %edx		/* check for left overs */
+#ifdef USE_AS_MEMPCPY
+	jnz	L(1)
+
+	movq	%rdi, %rax
+#else
+	movq	RETVAL (%rsp), %rax
+	jnz	L(1)
+
+	rep
+#endif
+	retq				/* exit */
+
+#ifndef NOT_IN_libc			/* none of the algorithms below for RTLD */
+
+	.p2align 4
+
+L(fastafter):
+
+/* Handle large blocks smaller than 1/2 L2. */
+
+L(pretry):				/* first 1/2 L2 */
+	movq	__x86_64_shared_cache_size_half (%rip), %r8
+	cmpq	%rdx, %r8		/* calculate the lesser of */
+	cmovaq	%rdx, %r8		/* remaining bytes and 1/2 L2 */
+
+L(pre):					/* 64-byte with prefetching */
+	movq	%r8, %rcx
+	andq	$-64, %r8
+	shrq	$6, %rcx
+	jz	L(preskip)
+
+	movq	%r14, SAVE0 (%rsp)
+	cfi_rel_offset (%r14, SAVE0)
+	movq	%r13, SAVE1 (%rsp)
+	cfi_rel_offset (%r13, SAVE1)
+	movq	%r12, SAVE2 (%rsp)
+	cfi_rel_offset (%r12, SAVE2)
+	movq	%rbx, SAVE3 (%rsp)
+	cfi_rel_offset (%rbx, SAVE3)
+
+	cmpl	$0, __x86_64_prefetchw (%rip)
+	jz	L(preloop)		/* check if PREFETCHW OK */
+
+	.p2align 4
+
+/* ... when PREFETCHW is available (less cache-probe traffic in MP systems). */
+
+L(prewloop):				/* cache-line in state M */
+	decq	%rcx
+
+	movq	   (%rsi), %rax
+	movq	 8 (%rsi), %rbx
+	movq	16 (%rsi), %r9
+	movq	24 (%rsi), %r10
+	movq	32 (%rsi), %r11
+	movq	40 (%rsi), %r12
+	movq	48 (%rsi), %r13
+	movq	56 (%rsi), %r14
+
+	prefetcht0	 0 + 896 (%rsi)
+	prefetcht0	64 + 896 (%rsi)
+
+	movq	%rax,    (%rdi)
+	movq	%rbx,  8 (%rdi)
+	movq	 %r9, 16 (%rdi)
+	movq	%r10, 24 (%rdi)
+	movq	%r11, 32 (%rdi)
+	movq	%r12, 40 (%rdi)
+	movq	%r13, 48 (%rdi)
+	movq	%r14, 56 (%rdi)
+
+	leaq	64 (%rsi), %rsi
+	leaq	64 (%rdi), %rdi
+
+	jz	L(prebail)
+
+	decq	%rcx
+
+	movq	   (%rsi), %rax
+	movq	 8 (%rsi), %rbx
+	movq	16 (%rsi), %r9
+	movq	24 (%rsi), %r10
+	movq	32 (%rsi), %r11
+	movq	40 (%rsi), %r12
+	movq	48 (%rsi), %r13
+	movq	56 (%rsi), %r14
+
+	movq	%rax,    (%rdi)
+	movq	%rbx,  8 (%rdi)
+	movq	 %r9, 16 (%rdi)
+	movq	%r10, 24 (%rdi)
+	movq	%r11, 32 (%rdi)
+	movq	%r12, 40 (%rdi)
+	movq	%r13, 48 (%rdi)
+	movq	%r14, 56 (%rdi)
+
+	prefetchw	896 - 64 (%rdi)
+	prefetchw	896 -  0 (%rdi)
+
+	leaq	64 (%rsi), %rsi
+	leaq	64 (%rdi), %rdi
+
+	jnz	L(prewloop)
+	jmp	L(prebail)
+
+	.p2align 4
+
+/* ... when PREFETCHW is not available. */
+
+L(preloop):				/* cache-line in state E */
+	decq	%rcx
+
+	movq	   (%rsi), %rax
+	movq	 8 (%rsi), %rbx
+	movq	16 (%rsi), %r9
+	movq	24 (%rsi), %r10
+	movq	32 (%rsi), %r11
+	movq	40 (%rsi), %r12
+	movq	48 (%rsi), %r13
+	movq	56 (%rsi), %r14
+
+	prefetcht0	896 +  0 (%rsi)
+	prefetcht0	896 + 64 (%rsi)
+
+	movq	%rax,    (%rdi)
+	movq	%rbx,  8 (%rdi)
+	movq	 %r9, 16 (%rdi)
+	movq	%r10, 24 (%rdi)
+	movq	%r11, 32 (%rdi)
+	movq	%r12, 40 (%rdi)
+	movq	%r13, 48 (%rdi)
+	movq	%r14, 56 (%rdi)
+
+	leaq	64 (%rsi), %rsi
+	leaq	64 (%rdi), %rdi
+
+	jz	L(prebail)
+
+	decq	%rcx
+
+	movq	   (%rsi), %rax
+	movq	 8 (%rsi), %rbx
+	movq	16 (%rsi), %r9
+	movq	24 (%rsi), %r10
+	movq	32 (%rsi), %r11
+	movq	40 (%rsi), %r12
+	movq	48 (%rsi), %r13
+	movq	56 (%rsi), %r14
+
+	prefetcht0	896 - 64 (%rdi)
+	prefetcht0	896 -  0 (%rdi)
+
+	movq	%rax,    (%rdi)
+	movq	%rbx,  8 (%rdi)
+	movq	 %r9, 16 (%rdi)
+	movq	%r10, 24 (%rdi)
+	movq	%r11, 32 (%rdi)
+	movq	%r12, 40 (%rdi)
+	movq	%r13, 48 (%rdi)
+	movq	%r14, 56 (%rdi)
+
+	leaq	64 (%rsi), %rsi
+	leaq	64 (%rdi), %rdi
+
+	jnz	L(preloop)
+
+L(prebail):
+	movq	SAVE3 (%rsp), %rbx
+	cfi_restore (%rbx)
+	movq	SAVE2 (%rsp), %r12
+	cfi_restore (%r12)
+	movq	SAVE1 (%rsp), %r13
+	cfi_restore (%r13)
+	movq	SAVE0 (%rsp), %r14
+	cfi_restore (%r14)
+
+/*       .p2align 4 */
+
+L(preskip):
+	subq	%r8, %rdx		/* check for more */
+	testq	$-64, %rdx
+	jnz	L(preafter)
+
+	andl	$63, %edx		/* check for left overs */
+#ifdef USE_AS_MEMPCPY
+	jnz	L(1)
+
+	movq	%rdi, %rax
+#else
+	movq	RETVAL (%rsp), %rax
+	jnz	L(1)
+
+	rep
+#endif
+	retq				/* exit */
+
+	.p2align 4
+
+L(preafter):
+
+/* Loop to handle huge blocks. */
+
+L(NTtry):
+
+L(NT):					/* non-temporal 128-byte */
+	movq	%rdx, %rcx
+	shrq	$7, %rcx
+	jz	L(NTskip)
+
+	movq	%r14, SAVE0 (%rsp)
+	cfi_rel_offset (%r14, SAVE0)
+	movq	%r13, SAVE1 (%rsp)
+	cfi_rel_offset (%r13, SAVE1)
+	movq	%r12, SAVE2 (%rsp)
+	cfi_rel_offset (%r12, SAVE2)
+
+       .p2align 4
+
+L(NTloop):
+	prefetchnta	768 (%rsi)
+	prefetchnta	832 (%rsi)
+
+	decq	%rcx
+
+	movq	   (%rsi), %rax
+	movq	 8 (%rsi), %r8
+	movq	16 (%rsi), %r9
+	movq	24 (%rsi), %r10
+	movq	32 (%rsi), %r11
+	movq	40 (%rsi), %r12
+	movq	48 (%rsi), %r13
+	movq	56 (%rsi), %r14
+
+	movntiq	%rax,    (%rdi)
+	movntiq	 %r8,  8 (%rdi)
+	movntiq	 %r9, 16 (%rdi)
+	movntiq	%r10, 24 (%rdi)
+	movntiq	%r11, 32 (%rdi)
+	movntiq	%r12, 40 (%rdi)
+	movntiq	%r13, 48 (%rdi)
+	movntiq	%r14, 56 (%rdi)
+
+	movq	 64 (%rsi), %rax
+	movq	 72 (%rsi), %r8
+	movq	 80 (%rsi), %r9
+	movq	 88 (%rsi), %r10
+	movq	 96 (%rsi), %r11
+	movq	104 (%rsi), %r12
+	movq	112 (%rsi), %r13
+	movq	120 (%rsi), %r14
+
+	movntiq	%rax,  64 (%rdi)
+	movntiq	 %r8,  72 (%rdi)
+	movntiq	 %r9,  80 (%rdi)
+	movntiq	%r10,  88 (%rdi)
+	movntiq	%r11,  96 (%rdi)
+	movntiq	%r12, 104 (%rdi)
+	movntiq	%r13, 112 (%rdi)
+	movntiq	%r14, 120 (%rdi)
+
+	leaq	128 (%rsi), %rsi
+	leaq	128 (%rdi), %rdi
+
+	jnz	L(NTloop)
+
+	sfence				/* serialize memory stores */
+
+	movq	SAVE2 (%rsp), %r12
+	cfi_restore (%r12)
+	movq	SAVE1 (%rsp), %r13
+	cfi_restore (%r13)
+	movq	SAVE0 (%rsp), %r14
+	cfi_restore (%r14)
+
+L(NTskip):
+	andl	$127, %edx		/* check for left overs */
+#ifdef USE_AS_MEMPCPY
+	jnz	L(1)
+
+	movq	%rdi, %rax
+#else
+	movq	RETVAL (%rsp), %rax
+	jnz	L(1)
+
+	rep
+#endif
+	retq				/* exit */
+
+#endif /* !NOT_IN_libc */
+
+END(memcpy)
+
+#ifndef USE_AS_MEMPCPY
 libc_hidden_builtin_def (memcpy)
 #endif

Modified: fsf/trunk/libc/sysdeps/x86_64/mempcpy.S
==============================================================================
--- fsf/trunk/libc/sysdeps/x86_64/mempcpy.S (original)
+++ fsf/trunk/libc/sysdeps/x86_64/mempcpy.S Wed May 23 00:03:40 2007
@@ -1,3 +1,4 @@
+#define USE_AS_MEMPCPY
 #define memcpy __mempcpy
 #define __memcpy_chk __mempcpy_chk
 #include <sysdeps/x86_64/memcpy.S>