[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[commits] r11869 - in /fsf/trunk/libc: ./ elf/ include/ malloc/ sysdeps/generic/ sysdeps/powerpc/ sysdeps/powerpc/powerpc32/a2/ sysdep...



Author: eglibc
Date: Mon Oct 25 00:03:15 2010
New Revision: 11869

Log:
Import glibc-mainline for 2010-10-25

Added:
    fsf/trunk/libc/sysdeps/powerpc/powerpc32/a2/
    fsf/trunk/libc/sysdeps/powerpc/powerpc32/a2/memcpy.S
    fsf/trunk/libc/sysdeps/powerpc/powerpc64/a2/
    fsf/trunk/libc/sysdeps/powerpc/powerpc64/a2/memcpy.S
    fsf/trunk/libc/sysdeps/unix/sysv/linux/powerpc/powerpc32/a2/
    fsf/trunk/libc/sysdeps/unix/sysv/linux/powerpc/powerpc32/a2/Implies
    fsf/trunk/libc/sysdeps/unix/sysv/linux/powerpc/powerpc64/a2/
    fsf/trunk/libc/sysdeps/unix/sysv/linux/powerpc/powerpc64/a2/Implies
Modified:
    fsf/trunk/libc/ChangeLog
    fsf/trunk/libc/Makeconfig
    fsf/trunk/libc/NEWS
    fsf/trunk/libc/elf/dl-deps.c
    fsf/trunk/libc/elf/dl-load.c
    fsf/trunk/libc/elf/dl-open.c
    fsf/trunk/libc/elf/rtld-Rules
    fsf/trunk/libc/elf/rtld.c
    fsf/trunk/libc/include/dlfcn.h
    fsf/trunk/libc/malloc/malloc.c
    fsf/trunk/libc/sysdeps/generic/ldsodefs.h
    fsf/trunk/libc/sysdeps/powerpc/dl-procinfo.c
    fsf/trunk/libc/sysdeps/powerpc/dl-procinfo.h

Modified: fsf/trunk/libc/ChangeLog
==============================================================================
--- fsf/trunk/libc/ChangeLog (original)
+++ fsf/trunk/libc/ChangeLog Mon Oct 25 00:03:15 2010
@@ -1,3 +1,46 @@
+2010-10-24  Ulrich Drepper  <drepper@xxxxxxxxxx>
+
+	[BZ #12140]
+	* malloc/malloc.c (_int_free): Fill correct number of bytes when
+	perturbing.
+
+2010-10-20  Michael B. Brutman  <brutman@xxxxxxxxxx>
+
+	* sysdeps/powerpc/dl-procinfo.c: Add support for ppca2 platform
+	* sysdeps/powerpc/dl-procinfo.h: Add support for ppca2 platform
+	* sysdeps/powerpc/powerpc32/a2/memcpy.S: New file.
+	* sysdeps/powerpc/powerpc64/a2/memcpy.S: Likewise.
+	* sysdeps/unix/sysv/linux/powerpc/powerpc32/a2/Implies: New
+	submachine.
+	* sysdeps/unix/sysv/linux/powerpc/powerpc64/a2/Implies: Likewise.
+
+2010-10-22  Andreas Schwab  <schwab@xxxxxxxxxx>
+
+	* include/dlfcn.h (__RTLD_SECURE): Define.
+	* elf/dl-load.c (_dl_map_object): Remove preloaded parameter.  Use
+	mode & __RTLD_SECURE instead.
+	(open_path): Rename preloaded parameter to secure.
+	* sysdeps/generic/ldsodefs.h (_dl_map_object): Adjust declaration.
+	* elf/dl-open.c (dl_open_worker): Adjust call to _dl_map_object.
+	* elf/dl-deps.c (openaux): Likewise.
+	* elf/rtld.c (struct map_args): Remove is_preloaded.
+	(map_doit): Don't use it.
+	(dl_main): Likewise.
+	(do_preload): Use __RTLD_SECURE instead of is_preloaded.
+	(dlmopen_doit): Add __RTLD_SECURE to mode bits.
+
+2010-09-09  Andreas Schwab  <schwab@xxxxxxxxxx>
+
+	* Makeconfig (sysd-rules-patterns): Add rtld-%:rtld-%.
+	(sysd-rules-targets): Remove duplicates.
+	* elf/rtld-Rules ($(objpfx)rtld-%.os): Add pattern rules with
+	rtld-%.$o dependency.
+
+2010-10-18  Andreas Schwab  <schwab@xxxxxxxxxx>
+
+	* elf/dl-open.c (dl_open_worker): Don't expand DST here, let
+	_dl_map_object do it.
+
 2010-10-19  Ulrich Drepper  <drepper@xxxxxxxxx>
 
 	* sysdeps/i386/bits/mathdef.h (FP_FAST_FMA): If the GCC 4.6 port has

Modified: fsf/trunk/libc/Makeconfig
==============================================================================
--- fsf/trunk/libc/Makeconfig (original)
+++ fsf/trunk/libc/Makeconfig Mon Oct 25 00:03:15 2010
@@ -695,7 +695,7 @@
 	   -include $(..)include/libc-symbols.h $(sysdep-CPPFLAGS) \
 	   $(CPPFLAGS-$(suffix $@)) \
 	   $(foreach lib,$(libof-$(basename $(@F))) \
-		         $(libof-$(<F)) $(libof-$(@F)),$(CPPFLAGS-$(lib))) \
+			 $(libof-$(<F)) $(libof-$(@F)),$(CPPFLAGS-$(lib))) \
 	   $(CPPFLAGS-$(<F)) $(CPPFLAGS-$(@F)) $(CPPFLAGS-$(basename $(@F)))
 override CFLAGS	= -std=gnu99 $(gnu89-inline-CFLAGS) \
 		  $(filter-out %frame-pointer,$(+cflags)) $(+gccwarn-c) \
@@ -966,7 +966,7 @@
 # emitted into sysd-rules.  A sysdeps Makeconfig fragment can
 # add its own special object file prefix to this list with e.g. foo-%:%
 # to have foo-*.? compiled from *.? using $(foo-CPPFLAGS).
-sysd-rules-patterns := %:% rtld-%:% m_%:s_%
+sysd-rules-patterns := %:% rtld-%:rtld-% rtld-%:% m_%:s_%
 
 # Let sysdeps/ subdirs contain a Makeconfig fragment for us to include here.
 sysdep-makeconfigs := $(wildcard $(+sysdep_dirs:=/Makeconfig))
@@ -975,8 +975,8 @@
 endif
 
 # Compute just the target patterns.  Makeconfig has set sysd-rules-patterns.
-sysd-rules-targets := $(foreach p,$(sysd-rules-patterns),\
-		      		$(firstword $(subst :, ,$p)))
+sysd-rules-targets := $(sort $(foreach p,$(sysd-rules-patterns),\
+					 $(firstword $(subst :, ,$p))))
 
 endif # Makeconfig not yet included
 

Modified: fsf/trunk/libc/NEWS
==============================================================================
--- fsf/trunk/libc/NEWS (original)
+++ fsf/trunk/libc/NEWS Mon Oct 25 00:03:15 2010
@@ -1,4 +1,4 @@
-GNU C Library NEWS -- history of user-visible changes.  2010-10-13
+GNU C Library NEWS -- history of user-visible changes.  2010-10-24
 Copyright (C) 1992-2009, 2010 Free Software Foundation, Inc.
 See the end for copying conditions.
 
@@ -11,7 +11,7 @@
 
   3268, 7066, 10851, 11611, 11640, 11701, 11840, 11856, 11883, 11903, 11904,
   11968, 11979, 12005, 12037, 12067, 12077, 12078, 12092, 12093, 12107, 12108,
-  12113
+  12113, 12140
 
 * New Linux interfaces: prlimit, prlimit64, fanotify_init, fanotify_mark
 

Modified: fsf/trunk/libc/elf/dl-deps.c
==============================================================================
--- fsf/trunk/libc/elf/dl-deps.c (original)
+++ fsf/trunk/libc/elf/dl-deps.c Mon Oct 25 00:03:15 2010
@@ -62,7 +62,7 @@
 {
   struct openaux_args *args = (struct openaux_args *) a;
 
-  args->aux = _dl_map_object (args->map, args->name, 0,
+  args->aux = _dl_map_object (args->map, args->name,
 			      (args->map->l_type == lt_executable
 			       ? lt_library : args->map->l_type),
 			      args->trace_mode, args->open_mode,

Modified: fsf/trunk/libc/elf/dl-load.c
==============================================================================
--- fsf/trunk/libc/elf/dl-load.c (original)
+++ fsf/trunk/libc/elf/dl-load.c Mon Oct 25 00:03:15 2010
@@ -1812,7 +1812,7 @@
    if MAY_FREE_DIRS is true.  */
 
 static int
-open_path (const char *name, size_t namelen, int preloaded,
+open_path (const char *name, size_t namelen, int secure,
 	   struct r_search_path_struct *sps, char **realname,
 	   struct filebuf *fbp, struct link_map *loader, int whatcode,
 	   bool *found_other_class)
@@ -1894,7 +1894,7 @@
 	  /* Remember whether we found any existing directory.  */
 	  here_any |= this_dir->status[cnt] != nonexisting;
 
-	  if (fd != -1 && __builtin_expect (preloaded, 0)
+	  if (fd != -1 && __builtin_expect (secure, 0)
 	      && INTUSE(__libc_enable_secure))
 	    {
 	      /* This is an extra security effort to make sure nobody can
@@ -1963,7 +1963,7 @@
 
 struct link_map *
 internal_function
-_dl_map_object (struct link_map *loader, const char *name, int preloaded,
+_dl_map_object (struct link_map *loader, const char *name,
 		int type, int trace_mode, int mode, Lmid_t nsid)
 {
   int fd;
@@ -2067,7 +2067,8 @@
 	  for (l = loader; l; l = l->l_loader)
 	    if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
 	      {
-		fd = open_path (name, namelen, preloaded, &l->l_rpath_dirs,
+		fd = open_path (name, namelen, mode & __RTLD_SECURE,
+				&l->l_rpath_dirs,
 				&realname, &fb, loader, LA_SER_RUNPATH,
 				&found_other_class);
 		if (fd != -1)
@@ -2082,14 +2083,15 @@
 	      && main_map != NULL && main_map->l_type != lt_loaded
 	      && cache_rpath (main_map, &main_map->l_rpath_dirs, DT_RPATH,
 			      "RPATH"))
-	    fd = open_path (name, namelen, preloaded, &main_map->l_rpath_dirs,
+	    fd = open_path (name, namelen, mode & __RTLD_SECURE,
+			    &main_map->l_rpath_dirs,
 			    &realname, &fb, loader ?: main_map, LA_SER_RUNPATH,
 			    &found_other_class);
 	}
 
       /* Try the LD_LIBRARY_PATH environment variable.  */
       if (fd == -1 && env_path_list.dirs != (void *) -1)
-	fd = open_path (name, namelen, preloaded, &env_path_list,
+	fd = open_path (name, namelen, mode & __RTLD_SECURE, &env_path_list,
 			&realname, &fb,
 			loader ?: GL(dl_ns)[LM_ID_BASE]._ns_loaded,
 			LA_SER_LIBPATH, &found_other_class);
@@ -2098,12 +2100,12 @@
       if (fd == -1 && loader != NULL
 	  && cache_rpath (loader, &loader->l_runpath_dirs,
 			  DT_RUNPATH, "RUNPATH"))
-	fd = open_path (name, namelen, preloaded,
+	fd = open_path (name, namelen, mode & __RTLD_SECURE,
 			&loader->l_runpath_dirs, &realname, &fb, loader,
 			LA_SER_RUNPATH, &found_other_class);
 
       if (fd == -1
-	  && (__builtin_expect (! preloaded, 1)
+	  && (__builtin_expect (! (mode & __RTLD_SECURE), 1)
 	      || ! INTUSE(__libc_enable_secure)))
 	{
 	  /* Check the list of libraries in the file /etc/ld.so.cache,
@@ -2169,7 +2171,7 @@
 	  && ((l = loader ?: GL(dl_ns)[nsid]._ns_loaded) == NULL
 	      || __builtin_expect (!(l->l_flags_1 & DF_1_NODEFLIB), 1))
 	  && rtld_search_dirs.dirs != (void *) -1)
-	fd = open_path (name, namelen, preloaded, &rtld_search_dirs,
+	fd = open_path (name, namelen, mode & __RTLD_SECURE, &rtld_search_dirs,
 			&realname, &fb, l, LA_SER_DEFAULT, &found_other_class);
 
       /* Add another newline when we are tracing the library loading.  */

Modified: fsf/trunk/libc/elf/dl-open.c
==============================================================================
--- fsf/trunk/libc/elf/dl-open.c (original)
+++ fsf/trunk/libc/elf/dl-open.c Mon Oct 25 00:03:15 2010
@@ -221,38 +221,9 @@
 
   assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
 
-  /* Maybe we have to expand a DST.  */
-  if (__builtin_expect (dst != NULL, 0))
-    {
-      size_t len = strlen (file);
-
-      /* Determine how much space we need.  We have to allocate the
-	 memory locally.  */
-      size_t required = DL_DST_REQUIRED (call_map, file, len,
-					 _dl_dst_count (dst, 0));
-
-      /* Get space for the new file name.  */
-      char *new_file = (char *) alloca (required + 1);
-
-      /* Generate the new file name.  */
-      _dl_dst_substitute (call_map, file, new_file, 0);
-
-      /* If the substitution failed don't try to load.  */
-      if (*new_file == '\0')
-	_dl_signal_error (0, "dlopen", NULL,
-			  N_("empty dynamic string token substitution"));
-
-      /* Now we have a new file name.  */
-      file = new_file;
-
-      /* It does not matter whether call_map is set even if we
-	 computed it only because of the DST.  Since the path contains
-	 a slash the value is not used.  See dl-load.c.  */
-    }
-
   /* Load the named object.  */
   struct link_map *new;
-  args->map = new = _dl_map_object (call_map, file, 0, lt_loaded, 0,
+  args->map = new = _dl_map_object (call_map, file, lt_loaded, 0,
 				    mode | __RTLD_CALLMAP, args->nsid);
 
   /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is

Modified: fsf/trunk/libc/elf/rtld-Rules
==============================================================================
--- fsf/trunk/libc/elf/rtld-Rules (original)
+++ fsf/trunk/libc/elf/rtld-Rules Mon Oct 25 00:03:15 2010
@@ -1,6 +1,6 @@
 # Subroutine makefile for compiling libc modules linked into dynamic linker.
 
-# Copyright (C) 2002, 2003, 2005, 2006, 2008 Free Software Foundation, Inc.
+# Copyright (C) 2002,2003,2005,2006,2008,2010 Free Software Foundation, Inc.
 # This file is part of the GNU C Library.
 
 # The GNU C Library is free software; you can redistribute it and/or
@@ -56,7 +56,7 @@
 # Use the verbose option of ar and tar when not running silently.
 ifeq	"$(findstring s,$(MAKEFLAGS))" ""	# if not -s
 verbose := v
-else	   					# -s
+else						# -s
 verbose	:=
 endif						# not -s
 
@@ -93,6 +93,12 @@
 # These are the basic compilation rules corresponding to the Makerules ones.
 # The sysd-rules generated makefile already defines pattern rules for rtld-%
 # targets built from sysdeps source files.
+$(objpfx)rtld-%.os: rtld-%.S $(before-compile)
+	$(compile-command.S) $(rtld-CPPFLAGS)
+$(objpfx)rtld-%.os: rtld-%.s $(before-compile)
+	$(compile-command.s) $(rtld-CPPFLAGS)
+$(objpfx)rtld-%.os: rtld-%.c $(before-compile)
+	$(compile-command.c) $(rtld-CPPFLAGS)
 $(objpfx)rtld-%.os: %.S $(before-compile)
 	$(compile-command.S) $(rtld-CPPFLAGS)
 $(objpfx)rtld-%.os: %.s $(before-compile)
@@ -101,6 +107,9 @@
 	$(compile-command.c) $(rtld-CPPFLAGS)
 
 # The rules for generated source files.
+$(objpfx)rtld-%.os: $(objpfx)rtld-%.S $(before-compile); $(compile-command.S)
+$(objpfx)rtld-%.os: $(objpfx)rtld-%.s $(before-compile); $(compile-command.s)
+$(objpfx)rtld-%.os: $(objpfx)rtld-%.c $(before-compile); $(compile-command.c)
 $(objpfx)rtld-%.os: $(objpfx)%.S $(before-compile); $(compile-command.S)
 $(objpfx)rtld-%.os: $(objpfx)%.s $(before-compile); $(compile-command.s)
 $(objpfx)rtld-%.os: $(objpfx)%.c $(before-compile); $(compile-command.c)

Modified: fsf/trunk/libc/elf/rtld.c
==============================================================================
--- fsf/trunk/libc/elf/rtld.c (original)
+++ fsf/trunk/libc/elf/rtld.c Mon Oct 25 00:03:15 2010
@@ -589,7 +589,6 @@
   /* Argument to map_doit.  */
   char *str;
   struct link_map *loader;
-  int is_preloaded;
   int mode;
   /* Return value of map_doit.  */
   struct link_map *map;
@@ -627,16 +626,17 @@
 map_doit (void *a)
 {
   struct map_args *args = (struct map_args *) a;
-  args->map = _dl_map_object (args->loader, args->str,
-			      args->is_preloaded, lt_library, 0, args->mode,
-			      LM_ID_BASE);
+  args->map = _dl_map_object (args->loader, args->str, lt_library, 0,
+			      args->mode, LM_ID_BASE);
 }
 
 static void
 dlmopen_doit (void *a)
 {
   struct dlmopen_args *args = (struct dlmopen_args *) a;
-  args->map = _dl_open (args->fname, RTLD_LAZY | __RTLD_DLOPEN | __RTLD_AUDIT,
+  args->map = _dl_open (args->fname,
+			(RTLD_LAZY | __RTLD_DLOPEN | __RTLD_AUDIT
+			 | __RTLD_SECURE),
 			dl_main, LM_ID_NEWLM, _dl_argc, INTUSE(_dl_argv),
 			__environ);
 }
@@ -806,8 +806,7 @@
 
   args.str = fname;
   args.loader = main_map;
-  args.is_preloaded = 1;
-  args.mode = 0;
+  args.mode = __RTLD_SECURE;
 
   unsigned int old_nloaded = GL(dl_ns)[LM_ID_BASE]._ns_nloaded;
 
@@ -1054,7 +1053,6 @@
 
 	  args.str = rtld_progname;
 	  args.loader = NULL;
-	  args.is_preloaded = 0;
 	  args.mode = __RTLD_OPENEXEC;
 	  (void) _dl_catch_error (&objname, &err_str, &malloced, map_doit,
 				  &args);
@@ -1066,7 +1064,7 @@
       else
 	{
 	  HP_TIMING_NOW (start);
-	  _dl_map_object (NULL, rtld_progname, 0, lt_library, 0,
+	  _dl_map_object (NULL, rtld_progname, lt_library, 0,
 			  __RTLD_OPENEXEC, LM_ID_BASE);
 	  HP_TIMING_NOW (stop);
 

Modified: fsf/trunk/libc/include/dlfcn.h
==============================================================================
--- fsf/trunk/libc/include/dlfcn.h (original)
+++ fsf/trunk/libc/include/dlfcn.h Mon Oct 25 00:03:15 2010
@@ -9,6 +9,7 @@
 #define __RTLD_OPENEXEC	0x20000000
 #define __RTLD_CALLMAP	0x10000000
 #define __RTLD_AUDIT	0x08000000
+#define __RTLD_SECURE	0x04000000 /* Apply additional security checks.  */
 
 #define __LM_ID_CALLER	-2
 

Modified: fsf/trunk/libc/malloc/malloc.c
==============================================================================
--- fsf/trunk/libc/malloc/malloc.c (original)
+++ fsf/trunk/libc/malloc/malloc.c Mon Oct 25 00:03:15 2010
@@ -4850,7 +4850,7 @@
       }
 
     if (__builtin_expect (perturb_byte, 0))
-      free_perturb (chunk2mem(p), size - SIZE_SZ);
+      free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
 
     set_fastchunks(av);
     unsigned int idx = fastbin_index(size);
@@ -4954,7 +4954,7 @@
       }
 
     if (__builtin_expect (perturb_byte, 0))
-      free_perturb (chunk2mem(p), size - SIZE_SZ);
+      free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
 
     /* consolidate backward */
     if (!prev_inuse(p)) {

Modified: fsf/trunk/libc/sysdeps/generic/ldsodefs.h
==============================================================================
--- fsf/trunk/libc/sysdeps/generic/ldsodefs.h (original)
+++ fsf/trunk/libc/sysdeps/generic/ldsodefs.h Mon Oct 25 00:03:15 2010
@@ -824,11 +824,9 @@
 
 /* Open the shared object NAME and map in its segments.
    LOADER's DT_RPATH is used in searching for NAME.
-   If the object is already opened, returns its existing map.
-   For preloaded shared objects PRELOADED is set to a non-zero
-   value to allow additional security checks.  */
+   If the object is already opened, returns its existing map.  */
 extern struct link_map *_dl_map_object (struct link_map *loader,
-					const char *name, int preloaded,
+					const char *name,
 					int type, int trace_mode, int mode,
 					Lmid_t nsid)
      internal_function attribute_hidden;

Modified: fsf/trunk/libc/sysdeps/powerpc/dl-procinfo.c
==============================================================================
--- fsf/trunk/libc/sysdeps/powerpc/dl-procinfo.c (original)
+++ fsf/trunk/libc/sysdeps/powerpc/dl-procinfo.c Mon Oct 25 00:03:15 2010
@@ -68,7 +68,7 @@
 #if !defined PROCINFO_DECL && defined SHARED
   ._dl_powerpc_platforms
 #else
-PROCINFO_CLASS const char _dl_powerpc_platforms[8][12]
+PROCINFO_CLASS const char _dl_powerpc_platforms[9][12]
 #endif
 #ifndef PROCINFO_DECL
 = {
@@ -79,7 +79,8 @@
     [PPC_PLATFORM_POWER6] = "power6",
     [PPC_PLATFORM_CELL_BE] = "ppc-cell-be",
     [PPC_PLATFORM_POWER6X] = "power6x",
-    [PPC_PLATFORM_POWER7] = "power7"
+    [PPC_PLATFORM_POWER7] = "power7",
+    [PPC_PLATFORM_PPCA2] = "ppca2"
   }
 #endif
 #if !defined SHARED || defined PROCINFO_DECL

Modified: fsf/trunk/libc/sysdeps/powerpc/dl-procinfo.h
==============================================================================
--- fsf/trunk/libc/sysdeps/powerpc/dl-procinfo.h (original)
+++ fsf/trunk/libc/sysdeps/powerpc/dl-procinfo.h Mon Oct 25 00:03:15 2010
@@ -31,7 +31,7 @@
 #define HWCAP_IMPORTANT		(PPC_FEATURE_HAS_ALTIVEC \
 				+ PPC_FEATURE_HAS_DFP)
 
-#define _DL_PLATFORMS_COUNT	8
+#define _DL_PLATFORMS_COUNT	9
 
 #define _DL_FIRST_PLATFORM      32
 /* Mask to filter out platforms.  */
@@ -47,6 +47,7 @@
 #define PPC_PLATFORM_CELL_BE		5
 #define PPC_PLATFORM_POWER6X		6
 #define PPC_PLATFORM_POWER7		7
+#define PPC_PLATFORM_PPCA2		8
 
 static inline const char *
 __attribute__ ((unused))
@@ -123,6 +124,10 @@
 		       GLRO(dl_powerpc_platforms)[PPC_PLATFORM_CELL_BE] + 3)
 	       == 0)
 	return _DL_FIRST_PLATFORM + PPC_PLATFORM_CELL_BE;
+      else if (strcmp (str + 3,
+		       GLRO(dl_powerpc_platforms)[PPC_PLATFORM_PPCA2] + 3)
+	       == 0)
+	return _DL_FIRST_PLATFORM + PPC_PLATFORM_PPCA2;
     }
 
   return -1;

Added: fsf/trunk/libc/sysdeps/powerpc/powerpc32/a2/memcpy.S
==============================================================================
--- fsf/trunk/libc/sysdeps/powerpc/powerpc32/a2/memcpy.S (added)
+++ fsf/trunk/libc/sysdeps/powerpc/powerpc32/a2/memcpy.S Mon Oct 25 00:03:15 2010
@@ -1,0 +1,511 @@
+/* Optimized memcpy implementation for PowerPC A2.
+   Copyright (C) 2010 Free Software Foundation, Inc.
+   Contributed by Michael Brutman <brutman@xxxxxxxxxx>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+#define PREFETCH_AHEAD 4        /* no cache lines SRC prefetching ahead  */
+#define ZERO_AHEAD 2            /* no cache lines DST zeroing ahead  */
+
+	.machine  a2
+EALIGN (BP_SYM (memcpy), 5, 0)
+	CALL_MCOUNT
+
+	dcbt    0,r4            /* Prefetch ONE SRC cacheline  */
+	cmplwi  cr1,r5,16       /* is size < 16 ?  */
+	mr      r6,r3           /* Copy dest reg to r6; */
+	blt+    cr1,L(shortcopy)
+
+
+	/* Big copy (16 bytes or more)
+
+	   Figure out how far to the nearest quadword boundary, or if we are
+	   on one already.
+
+	   r3 - return value (always)
+	   r4 - current source addr
+	   r5 - copy length
+	   r6 - current dest addr
+	*/
+
+	neg     r8,r3           /* LS 4 bits = # bytes to 8-byte dest bdry  */
+	clrlwi  r8,r8,32-4      /* align to 16byte boundary  */
+	sub     r7,r4,r3        /* compute offset to src from dest */
+	cmplwi  cr0,r8,0        /* Were we aligned on a 16 byte bdy? */
+	beq+    L(dst_aligned)
+
+
+
+	/* Destination is not aligned on quadword boundary.  Get us to one.
+
+	   r3 - return value (always)
+	   r4 - current source addr
+	   r5 - copy length
+	   r6 - current dest addr
+	   r7 - offset to src from dest
+	   r8 - number of bytes to quadword boundary
+	*/
+
+	mtcrf   0x01,r8         /* put #bytes to boundary into cr7  */
+	subf    r5,r8,r5        /* adjust remaining len */
+
+	bf      cr7*4+3,1f
+	lbzx    r0,r7,r6        /* copy 1 byte addr */
+	stb     r0,0(r6)
+	addi    r6,r6,1
+1:
+	bf      cr7*4+2,2f
+	lhzx    r0,r7,r6        /* copy 2 byte addr */
+	sth     r0,0(r6)
+	addi    r6,r6,2
+2:
+	bf      cr7*4+1,4f
+	lwzx    r0,r7,r6        /* copy 4 byte addr */
+	stw     r0,0(r6)
+	addi    r6,r6,4
+4:
+	bf      cr7*4+0,8f
+	lfdx    r0,r7,r6        /* copy 8 byte addr */
+	stfd    r0,0(r6)
+	addi    r6,r6,8
+8:
+	add     r4,r7,r6        /* update src addr */
+
+
+
+	/* Dest is quadword aligned now.
+
+	   Lots of decisions to make.  If we are copying less than a cache
+	   line we won't be here long.  If we are not on a cache line
+	   boundary we need to get there.  And then we need to figure out
+	   how many cache lines ahead to pre-touch.
+
+	   r3 - return value (always)
+	   r4 - current source addr
+	   r5 - copy length
+	   r6 - current dest addr
+	*/
+
+
+	.align  4
+L(dst_aligned):
+
+
+#ifdef SHARED
+	mflr    r0
+/* Establishes GOT addressability so we can load __cache_line_size
+   from static. This value was set from the aux vector during startup.  */
+	bcl     20,31,1f
+1:
+	mflr    r9
+	addis   r9,r9,__cache_line_size-1b@ha
+	lwz     r9,__cache_line_size-1b@l(r9)
+	mtlr    r0
+#else
+/* Load __cache_line_size from static. This value was set from the
+   aux vector during startup.  */
+	lis     r9,__cache_line_size@ha
+	lwz     r9,__cache_line_size@l(r9)
+#endif
+
+	cmplwi  cr5, r9, 0
+	bne+    cr5,L(cachelineset)
+	li      r9,64
+
+
+
+L(cachelineset):
+
+	addi   r10,r9,-1
+
+	cmpw   cr5,r5,r10       /* Less than a cacheline to go? */
+
+	neg     r7,r6           /* How far to next cacheline bdy? */
+
+	addi    r6,r6,-8        /* prepare for stdu  */
+	cmpwi   cr0,r9,128
+	addi    r4,r4,-8        /* prepare for ldu  */
+
+
+	ble+    cr5,L(lessthancacheline)
+
+	beq-    cr0,L(big_lines) /* 128 byte line code */
+
+
+
+
+	/* More than a cacheline left to go, and using 64 byte cachelines */
+
+	clrlwi  r7,r7,32-6      /* How far to next cacheline bdy? */
+
+	cmplwi  cr6,r7,0        /* Are we on a cacheline bdy already? */
+
+	/* Reduce total len by what it takes to get to the next cache line */
+	subf    r5,r7,r5
+	srwi    r7,r7,4         /* How many qws to get to the line bdy? */
+
+	/* How many full cache lines to copy after getting to a line bdy? */
+	srwi    r10,r5,6
+
+	cmplwi  r10,0           /* If no full cache lines to copy ... */
+	li      r11,0           /* number cachelines to copy with prefetch  */
+	beq     L(nocacheprefetch)
+
+
+	/* We are here because we have at least one full cache line to copy,
+	   and therefore some pre-touching to do. */
+
+	cmplwi  r10,PREFETCH_AHEAD
+	li      r12,64+8        /* prefetch distance  */
+	ble     L(lessthanmaxprefetch)
+
+	/* We can only do so much pre-fetching.  R11 will have the count of
+	   lines left to prefetch after the initial batch of prefetches
+	   are executed. */
+
+	subi    r11,r10,PREFETCH_AHEAD
+	li      r10,PREFETCH_AHEAD
+
+L(lessthanmaxprefetch):
+	mtctr   r10
+
+	/* At this point r10/ctr hold the number of lines to prefetch in this
+	   initial batch, and r11 holds any remainder. */
+
+L(prefetchSRC):
+	dcbt    r12,r4
+	addi    r12,r12,64
+	bdnz    L(prefetchSRC)
+
+
+	/* Prefetching is done, or was not needed.
+
+	   cr6 - are we on a cacheline boundary already?
+	   r7  - number of quadwords to the next cacheline boundary
+	*/
+
+L(nocacheprefetch):
+	mtctr   r7
+
+	cmplwi  cr1,r5,64   /* Less than a cache line to copy? */
+
+	/* How many bytes are left after we copy whatever full
+	   cache lines we can get? */
+	clrlwi  r5,r5,32-6
+
+	beq     cr6,L(cachelinealigned)
+
+
+	/* Copy quadwords up to the next cacheline boundary */
+
+L(aligntocacheline):
+	lfd     fp9,0x08(r4)
+	lfdu    fp10,0x10(r4)
+	stfd    fp9,0x08(r6)
+	stfdu   fp10,0x10(r6)
+	bdnz    L(aligntocacheline)
+
+
+	.align 4
+L(cachelinealigned):            /* copy while cache lines  */
+
+	blt-    cr1,L(lessthancacheline) /* size <64  */
+
+L(outerloop):
+	cmpwi   r11,0
+	mtctr   r11
+	beq-    L(endloop)
+
+	li      r11,64*ZERO_AHEAD +8    /* DCBZ dist  */
+
+	.align  4
+	/* Copy whole cachelines, optimized by prefetching SRC cacheline  */
+L(loop):                        /* Copy aligned body  */
+	dcbt    r12,r4          /* PREFETCH SOURCE some cache lines ahead  */
+	lfd     fp9,  0x08(r4)
+	dcbz    r11,r6
+	lfd     fp10, 0x10(r4)
+	lfd     fp11, 0x18(r4)
+	lfd     fp12, 0x20(r4)
+	stfd    fp9,  0x08(r6)
+	stfd    fp10, 0x10(r6)
+	stfd    fp11, 0x18(r6)
+	stfd    fp12, 0x20(r6)
+	lfd     fp9,  0x28(r4)
+	lfd     fp10, 0x30(r4)
+	lfd     fp11, 0x38(r4)
+	lfdu    fp12, 0x40(r4)
+	stfd    fp9,  0x28(r6)
+	stfd    fp10, 0x30(r6)
+	stfd    fp11, 0x38(r6)
+	stfdu   fp12, 0x40(r6)
+
+	bdnz    L(loop)
+
+
+L(endloop):
+	cmpwi   r10,0
+	beq-    L(endloop2)
+	mtctr   r10
+
+L(loop2):                       /* Copy aligned body  */
+	lfd     fp9,  0x08(r4)
+	lfd     fp10, 0x10(r4)
+	lfd     fp11, 0x18(r4)
+	lfd     fp12, 0x20(r4)
+	stfd    fp9,  0x08(r6)
+	stfd    fp10, 0x10(r6)
+	stfd    fp11, 0x18(r6)
+	stfd    fp12, 0x20(r6)
+	lfd     fp9,  0x28(r4)
+	lfd     fp10, 0x30(r4)
+	lfd     fp11, 0x38(r4)
+	lfdu    fp12, 0x40(r4)
+	stfd    fp9,  0x28(r6)
+	stfd    fp10, 0x30(r6)
+	stfd    fp11, 0x38(r6)
+	stfdu   fp12, 0x40(r6)
+
+	bdnz    L(loop2)
+L(endloop2):
+
+
+	.align  4
+L(lessthancacheline):           /* Was there less than cache to do ?  */
+	cmplwi  cr0,r5,16
+	srwi    r7,r5,4         /* divide size by 16  */
+	blt-    L(do_lt16)
+	mtctr   r7
+
+L(copy_remaining):
+	lfd     fp9,  0x08(r4)
+	lfdu    fp10, 0x10(r4)
+	stfd    fp9,  0x08(r6)
+	stfdu   fp10, 0x10(r6)
+	bdnz    L(copy_remaining)
+
+L(do_lt16):                     /* less than 16 ?  */
+	cmplwi  cr0,r5,0        /* copy remaining bytes (0-15)  */
+	beqlr+                  /* no rest to copy  */
+	addi    r4,r4,8
+	addi    r6,r6,8
+
+L(shortcopy):                   /* SIMPLE COPY to handle size =< 15 bytes  */
+	mtcrf   0x01,r5
+	sub     r7,r4,r6
+	bf-     cr7*4+0,8f
+	lfdx    fp9,r7,r6       /* copy 8 byte  */
+	stfd    fp9,0(r6)
+	addi    r6,r6,8
+8:
+	bf      cr7*4+1,4f
+	lwzx    r0,r7,r6        /* copy 4 byte  */
+	stw     r0,0(r6)
+	addi    r6,r6,4
+4:
+	bf      cr7*4+2,2f
+	lhzx    r0,r7,r6        /* copy 2 byte  */
+	sth     r0,0(r6)
+	addi    r6,r6,2
+2:
+	bf      cr7*4+3,1f
+	lbzx    r0,r7,r6        /* copy 1 byte  */
+	stb     r0,0(r6)
+1:
+	blr
+
+
+
+
+
+	/* Similar to above, but for use with 128 byte lines. */
+
+
+L(big_lines):
+
+	clrlwi  r7,r7,32-7      /* How far to next cacheline bdy? */
+
+	cmplwi  cr6,r7,0        /* Are we on a cacheline bdy already? */
+
+	/* Reduce total len by what it takes to get to the next cache line */
+	subf    r5,r7,r5
+	srwi    r7,r7,4         /* How many qw to get to the line bdy? */
+
+	/* How many full cache lines to copy after getting to a line bdy? */
+	srwi    r10,r5,7
+
+	cmplwi  r10,0           /* If no full cache lines to copy ... */
+	li      r11,0           /* number cachelines to copy with prefetch  */
+	beq     L(nocacheprefetch_128)
+
+
+	/* We are here because we have at least one full cache line to copy,
+	   and therefore some pre-touching to do. */
+
+	cmplwi  r10,PREFETCH_AHEAD
+	li      r12,128+8       /* prefetch distance  */
+	ble     L(lessthanmaxprefetch_128)
+
+	/* We can only do so much pre-fetching.  R11 will have the count of
+	   lines left to prefetch after the initial batch of prefetches
+	   are executed. */
+
+	subi    r11,r10,PREFETCH_AHEAD
+	li      r10,PREFETCH_AHEAD
+
+L(lessthanmaxprefetch_128):
+	mtctr   r10
+
+	/* At this point r10/ctr hold the number of lines to prefetch in this
+	   initial batch, and r11 holds any remainder. */
+
+L(prefetchSRC_128):
+	dcbt    r12,r4
+	addi    r12,r12,128
+	bdnz    L(prefetchSRC_128)
+
+
+	/* Prefetching is done, or was not needed.
+
+	   cr6 - are we on a cacheline boundary already?
+	   r7  - number of quadwords to the next cacheline boundary
+	*/
+
+L(nocacheprefetch_128):
+	mtctr   r7
+
+	cmplwi  cr1,r5,128  /* Less than a cache line to copy? */
+
+	/* How many bytes are left after we copy whatever full
+	   cache lines we can get? */
+	clrlwi  r5,r5,32-7
+
+	beq     cr6,L(cachelinealigned_128)
+
+
+	/* Copy quadwords up to the next cacheline boundary */
+
+L(aligntocacheline_128):
+	lfd     fp9,0x08(r4)
+	lfdu    fp10,0x10(r4)
+	stfd    fp9,0x08(r6)
+	stfdu   fp10,0x10(r6)
+	bdnz    L(aligntocacheline_128)
+
+
+L(cachelinealigned_128):        /* copy while cache lines  */
+
+	blt-    cr1,L(lessthancacheline) /* size <128  */
+
+L(outerloop_128):
+	cmpwi   r11,0
+	mtctr   r11
+	beq-    L(endloop_128)
+
+	li      r11,128*ZERO_AHEAD +8    /* DCBZ dist  */
+
+	.align  4
+	/* Copy whole cachelines, optimized by prefetching SRC cacheline  */
+L(loop_128):                    /* Copy aligned body  */
+	dcbt    r12,r4          /* PREFETCH SOURCE some cache lines ahead  */
+	lfd     fp9,  0x08(r4)
+	dcbz    r11,r6
+	lfd     fp10, 0x10(r4)
+	lfd     fp11, 0x18(r4)
+	lfd     fp12, 0x20(r4)
+	stfd    fp9,  0x08(r6)
+	stfd    fp10, 0x10(r6)
+	stfd    fp11, 0x18(r6)
+	stfd    fp12, 0x20(r6)
+	lfd     fp9,  0x28(r4)
+	lfd     fp10, 0x30(r4)
+	lfd     fp11, 0x38(r4)
+	lfd     fp12, 0x40(r4)
+	stfd    fp9,  0x28(r6)
+	stfd    fp10, 0x30(r6)
+	stfd    fp11, 0x38(r6)
+	stfd    fp12, 0x40(r6)
+	lfd     fp9,  0x48(r4)
+	lfd     fp10, 0x50(r4)
+	lfd     fp11, 0x58(r4)
+	lfd     fp12, 0x60(r4)
+	stfd    fp9,  0x48(r6)
+	stfd    fp10, 0x50(r6)
+	stfd    fp11, 0x58(r6)
+	stfd    fp12, 0x60(r6)
+	lfd     fp9,  0x68(r4)
+	lfd     fp10, 0x70(r4)
+	lfd     fp11, 0x78(r4)
+	lfdu    fp12, 0x80(r4)
+	stfd    fp9,  0x68(r6)
+	stfd    fp10, 0x70(r6)
+	stfd    fp11, 0x78(r6)
+	stfdu   fp12, 0x80(r6)
+
+	bdnz    L(loop_128)
+
+
+L(endloop_128):
+	cmpwi   r10,0
+	beq-    L(endloop2_128)
+	mtctr   r10
+
+L(loop2_128):                   /* Copy aligned body  */
+	lfd     fp9,  0x08(r4)
+	lfd     fp10, 0x10(r4)
+	lfd     fp11, 0x18(r4)
+	lfd     fp12, 0x20(r4)
+	stfd    fp9,  0x08(r6)
+	stfd    fp10, 0x10(r6)
+	stfd    fp11, 0x18(r6)
+	stfd    fp12, 0x20(r6)
+	lfd     fp9,  0x28(r4)
+	lfd     fp10, 0x30(r4)
+	lfd     fp11, 0x38(r4)
+	lfd     fp12, 0x40(r4)
+	stfd    fp9,  0x28(r6)
+	stfd    fp10, 0x30(r6)
+	stfd    fp11, 0x38(r6)
+	stfd    fp12, 0x40(r6)
+	lfd     fp9,  0x48(r4)
+	lfd     fp10, 0x50(r4)
+	lfd     fp11, 0x58(r4)
+	lfd     fp12, 0x60(r4)
+	stfd    fp9,  0x48(r6)
+	stfd    fp10, 0x50(r6)
+	stfd    fp11, 0x58(r6)
+	stfd    fp12, 0x60(r6)
+	lfd     fp9,  0x68(r4)
+	lfd     fp10, 0x70(r4)
+	lfd     fp11, 0x78(r4)
+	lfdu    fp12, 0x80(r4)
+	stfd    fp9,  0x68(r6)
+	stfd    fp10, 0x70(r6)
+	stfd    fp11, 0x78(r6)
+	stfdu   fp12, 0x80(r6)
+	bdnz    L(loop2_128)
+L(endloop2_128):
+
+	b       L(lessthancacheline)
+
+
+END (BP_SYM (memcpy))
+libc_hidden_builtin_def (memcpy)

Added: fsf/trunk/libc/sysdeps/powerpc/powerpc64/a2/memcpy.S
==============================================================================
--- fsf/trunk/libc/sysdeps/powerpc/powerpc64/a2/memcpy.S (added)
+++ fsf/trunk/libc/sysdeps/powerpc/powerpc64/a2/memcpy.S Mon Oct 25 00:03:15 2010
@@ -1,0 +1,501 @@
+/* Optimized memcpy implementation for PowerPC A2.
+   Copyright (C) 2010 Free Software Foundation, Inc.
+   Contributed by Michael Brutman <brutman@xxxxxxxxxx>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+#define PREFETCH_AHEAD 4        /* no cache lines SRC prefetching ahead  */
+#define ZERO_AHEAD 2            /* no cache lines DST zeroing ahead  */
+
+	.section        ".toc","aw"
+.LC0:
+	.tc __cache_line_size[TC],__cache_line_size
+	.section        ".text"
+	.align 2
+
+
+	.machine  a2
+EALIGN (BP_SYM (memcpy), 5, 0)
+	CALL_MCOUNT 3
+
+	dcbt    0,r4            /* Prefetch ONE SRC cacheline  */
+	cmpldi  cr1,r5,16       /* is size < 16 ?  */
+	mr      r6,r3           /* Copy dest reg to r6; */
+	blt+    cr1,L(shortcopy)
+
+
+	/* Big copy (16 bytes or more)
+
+	   Figure out how far to the nearest quadword boundary, or if we are
+	   on one already.  Also get the cache line size.
+
+	   r3 - return value (always)
+	   r4 - current source addr
+	   r5 - copy length
+	   r6 - current dest addr
+	*/
+
+	neg     r8,r3           /* LS 4 bits = # bytes to 8-byte dest bdry  */
+	ld      r9,.LC0@toc(r2) /* Get cache line size (part 1) */
+	clrldi  r8,r8,64-4      /* align to 16byte boundary  */
+	sub     r7,r4,r3        /* compute offset to src from dest */
+	lwz     r9,0(r9)        /* Get cache line size (part 2) */
+	cmpldi  cr0,r8,0        /* Were we aligned on a 16 byte bdy? */
+	addi    r10,r9,-1       /* Cache line mask */
+	beq+    L(dst_aligned)
+
+
+
+	/* Destination is not aligned on quadword boundary.  Get us to one.
+
+	   r3 - return value (always)
+	   r4 - current source addr
+	   r5 - copy length
+	   r6 - current dest addr
+	   r7 - offset to src from dest
+	   r8 - number of bytes to quadword boundary
+	*/
+
+	mtcrf   0x01,r8         /* put #bytes to boundary into cr7  */
+	subf    r5,r8,r5        /* adjust remaining len */
+
+	bf      cr7*4+3,1f
+	lbzx    r0,r7,r6        /* copy 1 byte addr */
+	stb     r0,0(r6)
+	addi    r6,r6,1
+1:
+	bf      cr7*4+2,2f
+	lhzx    r0,r7,r6        /* copy 2 byte addr */
+	sth     r0,0(r6)
+	addi    r6,r6,2
+2:
+	bf      cr7*4+1,4f
+	lwzx    r0,r7,r6        /* copy 4 byte addr */
+	stw     r0,0(r6)
+	addi    r6,r6,4
+4:
+	bf      cr7*4+0,8f
+	ldx     r0,r7,r6        /* copy 8 byte addr */
+	std     r0,0(r6)
+	addi    r6,r6,8
+8:
+	add     r4,r7,r6        /* update src addr */
+
+
+
+	/* Dest is quadword aligned now.
+
+	   Lots of decisions to make.  If we are copying less than a cache
+	   line we won't be here long.  If we are not on a cache line
+	   boundary we need to get there.  And then we need to figure out
+	   how many cache lines ahead to pre-touch.
+
+	   r3 - return value (always)
+	   r4 - current source addr
+	   r5 - copy length
+	   r6 - current dest addr
+	*/
+
+
+	.align 4
+L(dst_aligned):
+
+
+	cmpd   cr5,r5,r10       /* Less than a cacheline to go? */
+
+	neg     r7,r6           /* How far to next cacheline bdy? */
+
+	addi    r6,r6,-8        /* prepare for stdu  */
+	cmpdi   cr0,r9,128
+	addi    r4,r4,-8        /* prepare for ldu  */
+
+
+	ble+    cr5,L(lessthancacheline)
+
+	beq-    cr0,L(big_lines) /* 128 byte line code */
+
+
+
+	/* More than a cacheline left to go, and using 64 byte cachelines */
+
+	clrldi  r7,r7,64-6      /* How far to next cacheline bdy? */
+
+	cmpldi  cr6,r7,0        /* Are we on a cacheline bdy already? */
+
+	/* Reduce total len by what it takes to get to the next cache line */
+	subf    r5,r7,r5
+	srdi    r7,r7,4         /* How many qws to get to the line bdy? */
+
+	/* How many full cache lines to copy after getting to a line bdy? */
+	srdi    r10,r5,6
+
+	cmpldi  r10,0           /* If no full cache lines to copy ... */
+	li      r11,0           /* number cachelines to copy with prefetch  */
+	beq     L(nocacheprefetch)
+
+
+	/* We are here because we have at least one full cache line to copy,
+	   and therefore some pre-touching to do. */
+
+	cmpldi  r10,PREFETCH_AHEAD
+	li      r12,64+8        /* prefetch distance  */
+	ble     L(lessthanmaxprefetch)
+
+	/* We can only do so much pre-fetching.  R11 will have the count of
+	   lines left to prefetch after the initial batch of prefetches
+	   are executed. */
+
+	subi    r11,r10,PREFETCH_AHEAD
+	li      r10,PREFETCH_AHEAD
+
+L(lessthanmaxprefetch):
+	mtctr   r10
+
+	/* At this point r10/ctr hold the number of lines to prefetch in this
+	   initial batch, and r11 holds any remainder. */
+
+L(prefetchSRC):
+	dcbt    r12,r4
+	addi    r12,r12,64
+	bdnz    L(prefetchSRC)
+
+
+	/* Prefetching is done, or was not needed.
+
+	   cr6 - are we on a cacheline boundary already?
+	   r7  - number of quadwords to the next cacheline boundary
+	*/
+
+L(nocacheprefetch):
+	mtctr   r7
+
+	cmpldi  cr1,r5,64   /* Less than a cache line to copy? */
+
+	/* How many bytes are left after we copy whatever full
+	   cache lines we can get? */
+	clrldi  r5,r5,64-6
+
+	beq     cr6,L(cachelinealigned)
+
+
+	/* Copy quadwords up to the next cacheline boundary */
+
+L(aligntocacheline):
+	ld      r9,0x08(r4)
+	ld      r7,0x10(r4)
+	addi    r4,r4,0x10
+	std     r9,0x08(r6)
+	stdu    r7,0x10(r6)
+	bdnz    L(aligntocacheline)
+
+
+	.align 4
+L(cachelinealigned):            /* copy while cache lines  */
+
+	blt-    cr1,L(lessthancacheline) /* size <64  */
+
+L(outerloop):
+	cmpdi   r11,0
+	mtctr   r11
+	beq-    L(endloop)
+
+	li      r11,64*ZERO_AHEAD +8    /* DCBZ dist  */
+
+	.align  4
+	/* Copy whole cachelines, optimized by prefetching SRC cacheline  */
+L(loop):                        /* Copy aligned body  */
+	dcbt    r12,r4          /* PREFETCH SOURCE some cache lines ahead  */
+	ld      r9, 0x08(r4)
+	dcbz    r11,r6
+	ld      r7, 0x10(r4)
+	ld      r8, 0x18(r4)
+	ld      r0, 0x20(r4)
+	std     r9, 0x08(r6)
+	std     r7, 0x10(r6)
+	std     r8, 0x18(r6)
+	std     r0, 0x20(r6)
+	ld      r9, 0x28(r4)
+	ld      r7, 0x30(r4)
+	ld      r8, 0x38(r4)
+	ld      r0, 0x40(r4)
+	addi    r4, r4,0x40
+	std     r9, 0x28(r6)
+	std     r7, 0x30(r6)
+	std     r8, 0x38(r6)
+	stdu    r0, 0x40(r6)
+
+	bdnz    L(loop)
+
+
+L(endloop):
+	cmpdi   r10,0
+	beq-    L(endloop2)
+	mtctr   r10
+
+L(loop2):                       /* Copy aligned body  */
+	ld      r9, 0x08(r4)
+	ld      r7, 0x10(r4)
+	ld      r8, 0x18(r4)
+	ld      r0, 0x20(r4)
+	std     r9, 0x08(r6)
+	std     r7, 0x10(r6)
+	std     r8, 0x18(r6)
+	std     r0, 0x20(r6)
+	ld      r9, 0x28(r4)
+	ld      r7, 0x30(r4)
+	ld      r8, 0x38(r4)
+	ld      r0, 0x40(r4)
+	addi    r4, r4,0x40
+	std     r9, 0x28(r6)
+	std     r7, 0x30(r6)
+	std     r8, 0x38(r6)
+	stdu    r0, 0x40(r6)
+
+	bdnz    L(loop2)
+L(endloop2):
+
+
+	.align 4
+L(lessthancacheline):           /* Was there less than cache to do ?  */
+	cmpldi  cr0,r5,16
+	srdi    r7,r5,4         /* divide size by 16  */
+	blt-    L(do_lt16)
+	mtctr   r7
+
+L(copy_remaining):
+	ld      r8,0x08(r4)
+	ld      r7,0x10(r4)
+	addi    r4,r4,0x10
+	std     r8,0x08(r6)
+	stdu    r7,0x10(r6)
+	bdnz    L(copy_remaining)
+
+L(do_lt16):                     /* less than 16 ?  */
+	cmpldi  cr0,r5,0        /* copy remaining bytes (0-15)  */
+	beqlr+                  /* no rest to copy  */
+	addi    r4,r4,8
+	addi    r6,r6,8
+
+L(shortcopy):                   /* SIMPLE COPY to handle size =< 15 bytes  */
+	mtcrf   0x01,r5
+	sub     r7,r4,r6
+	bf-     cr7*4+0,8f
+	ldx     r0,r7,r6        /* copy 8 byte  */
+	std     r0,0(r6)
+	addi    r6,r6,8
+8:
+	bf      cr7*4+1,4f
+	lwzx    r0,r7,r6        /* copy 4 byte  */
+	stw     r0,0(r6)
+	addi    r6,r6,4
+4:
+	bf      cr7*4+2,2f
+	lhzx    r0,r7,r6        /* copy 2 byte  */
+	sth     r0,0(r6)
+	addi    r6,r6,2
+2:
+	bf      cr7*4+3,1f
+	lbzx    r0,r7,r6        /* copy 1 byte  */
+	stb     r0,0(r6)
+1:
+	blr
+
+
+
+
+
+	/* Similar to above, but for use with 128 byte lines. */
+
+
+L(big_lines):
+
+	clrldi  r7,r7,64-7      /* How far to next cacheline bdy? */
+
+	cmpldi  cr6,r7,0        /* Are we on a cacheline bdy already? */
+
+	/* Reduce total len by what it takes to get to the next cache line */
+	subf    r5,r7,r5
+	srdi    r7,r7,4         /* How many qws to get to the line bdy? */
+
+	/* How many full cache lines to copy after getting to a line bdy? */
+	srdi    r10,r5,7
+
+	cmpldi  r10,0           /* If no full cache lines to copy ... */
+	li      r11,0           /* number cachelines to copy with prefetch  */
+	beq     L(nocacheprefetch_128)
+
+
+	/* We are here because we have at least one full cache line to copy,
+	   and therefore some pre-touching to do. */
+
+	cmpldi  r10,PREFETCH_AHEAD
+	li      r12,128+8       /* prefetch distance  */
+	ble     L(lessthanmaxprefetch_128)
+
+	/* We can only do so much pre-fetching.  R11 will have the count of
+	   lines left to prefetch after the initial batch of prefetches
+	   are executed. */
+
+	subi    r11,r10,PREFETCH_AHEAD
+	li      r10,PREFETCH_AHEAD
+
+L(lessthanmaxprefetch_128):
+	mtctr   r10
+
+	/* At this point r10/ctr hold the number of lines to prefetch in this
+	   initial batch, and r11 holds any remainder. */
+
+L(prefetchSRC_128):
+	dcbt    r12,r4
+	addi    r12,r12,128
+	bdnz    L(prefetchSRC_128)
+
+
+	/* Prefetching is done, or was not needed.
+
+	   cr6 - are we on a cacheline boundary already?
+	   r7  - number of quadwords to the next cacheline boundary
+	*/
+
+L(nocacheprefetch_128):
+	mtctr   r7
+
+	cmpldi  cr1,r5,128  /* Less than a cache line to copy? */
+
+	/* How many bytes are left after we copy whatever full
+	   cache lines we can get? */
+	clrldi  r5,r5,64-7
+
+	beq     cr6,L(cachelinealigned_128)
+
+
+	/* Copy quadwords up to the next cacheline boundary */
+
+L(aligntocacheline_128):
+	ld      r9,0x08(r4)
+	ld      r7,0x10(r4)
+	addi    r4,r4,0x10
+	std     r9,0x08(r6)
+	stdu    r7,0x10(r6)
+	bdnz    L(aligntocacheline_128)
+
+
+L(cachelinealigned_128):        /* copy while cache lines  */
+
+	blt-    cr1,L(lessthancacheline) /* size <128  */
+
+L(outerloop_128):
+	cmpdi   r11,0
+	mtctr   r11
+	beq-    L(endloop_128)
+
+	li      r11,128*ZERO_AHEAD +8    /* DCBZ dist  */
+
+	.align  4
+	/* Copy whole cachelines, optimized by prefetching SRC cacheline  */
+L(loop_128):                    /* Copy aligned body  */
+	dcbt    r12,r4          /* PREFETCH SOURCE some cache lines ahead  */
+	ld      r9, 0x08(r4)
+	dcbz    r11,r6
+	ld      r7, 0x10(r4)
+	ld      r8, 0x18(r4)
+	ld      r0, 0x20(r4)
+	std     r9, 0x08(r6)
+	std     r7, 0x10(r6)
+	std     r8, 0x18(r6)
+	std     r0, 0x20(r6)
+	ld      r9, 0x28(r4)
+	ld      r7, 0x30(r4)
+	ld      r8, 0x38(r4)
+	ld      r0, 0x40(r4)
+	std     r9, 0x28(r6)
+	std     r7, 0x30(r6)
+	std     r8, 0x38(r6)
+	std     r0, 0x40(r6)
+	ld      r9, 0x48(r4)
+	ld      r7, 0x50(r4)
+	ld      r8, 0x58(r4)
+	ld      r0, 0x60(r4)
+	std     r9, 0x48(r6)
+	std     r7, 0x50(r6)
+	std     r8, 0x58(r6)
+	std     r0, 0x60(r6)
+	ld      r9, 0x68(r4)
+	ld      r7, 0x70(r4)
+	ld      r8, 0x78(r4)
+	ld      r0, 0x80(r4)
+	addi    r4, r4,0x80
+	std     r9, 0x68(r6)
+	std     r7, 0x70(r6)
+	std     r8, 0x78(r6)
+	stdu    r0, 0x80(r6)
+
+	bdnz    L(loop_128)
+
+
+L(endloop_128):
+	cmpdi   r10,0
+	beq-    L(endloop2_128)
+	mtctr   r10
+
+L(loop2_128):                       /* Copy aligned body  */
+	ld      r9, 0x08(r4)
+	ld      r7, 0x10(r4)
+	ld      r8, 0x18(r4)
+	ld      r0, 0x20(r4)
+	std     r9, 0x08(r6)
+	std     r7, 0x10(r6)
+	std     r8, 0x18(r6)
+	std     r0, 0x20(r6)
+	ld      r9, 0x28(r4)
+	ld      r7, 0x30(r4)
+	ld      r8, 0x38(r4)
+	ld      r0, 0x40(r4)
+	std     r9, 0x28(r6)
+	std     r7, 0x30(r6)
+	std     r8, 0x38(r6)
+	std     r0, 0x40(r6)
+	ld      r9, 0x48(r4)
+	ld      r7, 0x50(r4)
+	ld      r8, 0x58(r4)
+	ld      r0, 0x60(r4)
+	std     r9, 0x48(r6)
+	std     r7, 0x50(r6)
+	std     r8, 0x58(r6)
+	std     r0, 0x60(r6)
+	ld      r9, 0x68(r4)
+	ld      r7, 0x70(r4)
+	ld      r8, 0x78(r4)
+	ld      r0, 0x80(r4)
+	addi    r4, r4,0x80
+	std     r9, 0x68(r6)
+	std     r7, 0x70(r6)
+	std     r8, 0x78(r6)
+	stdu    r0, 0x80(r6)
+
+	bdnz    L(loop2_128)
+L(endloop2_128):
+
+	b       L(lessthancacheline)
+
+
+END_GEN_TB (BP_SYM (memcpy),TB_TOCLESS)
+libc_hidden_builtin_def (memcpy)

Added: fsf/trunk/libc/sysdeps/unix/sysv/linux/powerpc/powerpc32/a2/Implies
==============================================================================
--- fsf/trunk/libc/sysdeps/unix/sysv/linux/powerpc/powerpc32/a2/Implies (added)
+++ fsf/trunk/libc/sysdeps/unix/sysv/linux/powerpc/powerpc32/a2/Implies Mon Oct 25 00:03:15 2010
@@ -1,0 +1,2 @@
+powerpc/powerpc32/a2/fpu
+powerpc/powerpc32/a2

Added: fsf/trunk/libc/sysdeps/unix/sysv/linux/powerpc/powerpc64/a2/Implies
==============================================================================
--- fsf/trunk/libc/sysdeps/unix/sysv/linux/powerpc/powerpc64/a2/Implies (added)
+++ fsf/trunk/libc/sysdeps/unix/sysv/linux/powerpc/powerpc64/a2/Implies Mon Oct 25 00:03:15 2010
@@ -1,0 +1,2 @@
+powerpc/powerpc64/a2/fpu
+powerpc/powerpc64/a2