[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[commits] r8673 - in /fsf/trunk/libc: ./ elf/ sysdeps/x86_64/ sysdeps/x86_64/bits/ sysdeps/x86_64/elf/



Author: eglibc
Date: Thu Jul 16 00:09:33 2009
New Revision: 8673

Log:
Import glibc-mainline for 2009-07-16

Added:
    fsf/trunk/libc/elf/tst-audit4.c
    fsf/trunk/libc/elf/tst-audit5.c
    fsf/trunk/libc/elf/tst-auditmod4a.c
    fsf/trunk/libc/elf/tst-auditmod4b.c
    fsf/trunk/libc/elf/tst-auditmod5a.c
    fsf/trunk/libc/elf/tst-auditmod5b.c
    fsf/trunk/libc/sysdeps/x86_64/link-defines.sym
Removed:
    fsf/trunk/libc/elf/do-lookup.h
Modified:
    fsf/trunk/libc/ChangeLog
    fsf/trunk/libc/config.h.in
    fsf/trunk/libc/config.make.in
    fsf/trunk/libc/configure
    fsf/trunk/libc/configure.in
    fsf/trunk/libc/elf/Makefile
    fsf/trunk/libc/elf/dl-lookup.c
    fsf/trunk/libc/sysdeps/x86_64/Makefile
    fsf/trunk/libc/sysdeps/x86_64/bits/link.h
    fsf/trunk/libc/sysdeps/x86_64/dl-trampoline.S
    fsf/trunk/libc/sysdeps/x86_64/elf/configure
    fsf/trunk/libc/sysdeps/x86_64/elf/configure.in

Modified: fsf/trunk/libc/ChangeLog
==============================================================================
--- fsf/trunk/libc/ChangeLog (original)
+++ fsf/trunk/libc/ChangeLog Thu Jul 16 00:09:33 2009
@@ -1,4 +1,47 @@
+2009-07-15  Ulrich Drepper  <drepper@xxxxxxxxxx>
+
+	* sysdeps/x86-64/dl-trampoline.h: Remove after integrating code into...
+	* sysdeps/x86-64/dl-trampoline.S: ...here.  Rewrite to avoid function
+	pointers in writable memory.
+
+2009-07-07  H.J. Lu  <hongjiu.lu@xxxxxxxxx>
+
+	* config.h.in: Add HAVE_AVX_SUPPORT entry.
+	* config.make.in: Add config-cflags-avx entry.
+	* configure.in: Substitute libc_cv_cc_avx.
+	* elf/Makefile: Add rules to build and run tst-audit4 and tst-audit5.
+	* elf/tst-audit4.c: New file.
+	* elf/tst-audit5.c: New file.
+	* elf/tst-auditmod4a.c: New file.
+	* elf/tst-auditmod4b.c: New file.
+	* elf/tst-auditmod5a.c: New file.
+	* elf/tst-auditmod5b.c: New file.
+	* sysdeps/x86_64/Makefile (gen-as-const-headers): Add
+	link-defines.sym.
+	* sysdeps/x86_64/bits/link.h (La_x86_64_ymm): New.
+	(La_x86_64_vector): Likewise.
+	(La_x86_64_regs): Append lr_vector.
+	(La_x86_64_retval): Append lr_vector0/lrv_vector1.
+	* sysdeps/x86_64/dl-trampoline.S (_dl_runtime_profile): Move
+	saving and restoring SSE registers to ...
+	* sysdeps/x86_64/dl-trampoline.h: This.  New file.
+	* sysdeps/x86_64/dl-trampoline.S: Include <config.h> and
+	<link-defines.h>.
+	(_dl_runtime_profile): Use LR_SIZE to allocate space for
+	La_x86_64_regs.  Allocate extra space and jump to memory at
+	save_and_restore_vector if HAVE_AVX_SUPPORT is defined.
+	(save_and_restore_vector_sse): New.
+	(save_and_restore_vector_avx): Likewise.
+	(check_avx): Likewise.
+	(save_and_restore_vector): Likewise.
+	* sysdeps/x86_64/elf/configure.in: Set libc_cv_cc_avx and
+	HAVE_AVX_SUPPORT.
+	* sysdeps/x86_64/link-defines.sym: New file.
+
 2009-07-10  Ulrich Drepper  <drepper@xxxxxxxxxx>
+
+	* elf/do-lookup.h: Removed after folding content into...
+	* elf/dl-lookup.c: ...here.
 
 	* sysdeps/unix/sysv/linux/sys/epoll.h: Fix comment.
 

Modified: fsf/trunk/libc/config.h.in
==============================================================================
--- fsf/trunk/libc/config.h.in (original)
+++ fsf/trunk/libc/config.h.in Thu Jul 16 00:09:33 2009
@@ -134,6 +134,9 @@
 
 /* Define if gcc supports SSE4.  */
 #undef	HAVE_SSE4_SUPPORT
+
+/* Define if gcc supports AVX.  */
+#undef	HAVE_AVX_SUPPORT
 
 /* Define if the compiler's exception support is based on libunwind.  */
 #undef	HAVE_CC_WITH_LIBUNWIND

Modified: fsf/trunk/libc/config.make.in
==============================================================================
--- fsf/trunk/libc/config.make.in (original)
+++ fsf/trunk/libc/config.make.in Thu Jul 16 00:09:33 2009
@@ -35,6 +35,7 @@
 asflags-cpu = @libc_cv_cc_submachine@
 
 config-cflags-sse4 = @libc_cv_cc_sse4@
+config-cflags-avx = @libc_cv_cc_avx@
 
 defines = @DEFINES@
 sysincludes = @SYSINCLUDES@

Modified: fsf/trunk/libc/configure
==============================================================================
--- fsf/trunk/libc/configure (original)
+++ fsf/trunk/libc/configure Thu Jul 16 00:09:33 2009
@@ -657,6 +657,7 @@
 elf
 ldd_rewrite_script
 use_ldconfig
+libc_cv_cc_avx
 libc_cv_cc_sse4
 libc_cv_cpp_asm_debuginfo
 libc_cv_forced_unwind
@@ -8772,6 +8773,7 @@
 
 
 
+
 if test $elf = yes; then
   cat >>confdefs.h <<\_ACEOF
 #define HAVE_ELF 1

Modified: fsf/trunk/libc/configure.in
==============================================================================
--- fsf/trunk/libc/configure.in (original)
+++ fsf/trunk/libc/configure.in Thu Jul 16 00:09:33 2009
@@ -2277,6 +2277,7 @@
 dnl sysdeps/CPU/configure.in checks set this via arch-specific asm tests
 AC_SUBST(libc_cv_cpp_asm_debuginfo)
 AC_SUBST(libc_cv_cc_sse4)
+AC_SUBST(libc_cv_cc_avx)
 
 AC_SUBST(use_ldconfig)
 AC_SUBST(ldd_rewrite_script)

Modified: fsf/trunk/libc/elf/Makefile
==============================================================================
--- fsf/trunk/libc/elf/Makefile (original)
+++ fsf/trunk/libc/elf/Makefile Thu Jul 16 00:09:33 2009
@@ -47,7 +47,7 @@
 		   dl-cache.h dl-hash.h soinit.c sofini.c ldd.bash.in \
 		   genrtldtbl.awk atomicity.h dl-procinfo.h ldsodefs.h \
 		   dl-librecon.h interp.c sln.c dl-dst.h hp-timing.h \
-		   do-lookup.h dl-lookupcfg.h sprof.c gen-trusted-dirs.awk \
+		   dl-lookupcfg.h sprof.c gen-trusted-dirs.awk \
 		   testobj1.c testobj2.c testobj3.c testobj4.c testobj5.c \
 		   testobj6.c testobj1_1.c failobj.c unloadmod.c \
 		   ldconfig.h ldconfig.c cache.c readlib.c readelflib.c \
@@ -89,8 +89,10 @@
 		   unload4mod1.c unload4mod2.c unload4mod3.c unload4mod4.c \
 		   unload6mod1.c unload6mod2.c unload6mod3.c \
 		   unload7mod1.c unload7mod2.c \
-		   tst-audit1.c tst-audit2.c tst-audit3.c \
+		   tst-audit1.c tst-audit2.c tst-audit3.c tst-audit4.c \
 		   tst-auditmod1.c tst-auditmod3a.c tst-auditmod3b.c \
+		   tst-auditmod4a.c tst-auditmod4b.c \
+		   tst-audit5.c tst-auditmod5a.c tst-auditmod5b.c \
 		   order2mod1.c order2mod2.c order2mod3.c order2mod4.c \
 		   tst-stackguard1.c tst-stackguard1-static.c \
 		   tst-array5.c tst-array5-static.c tst-array5dep.c \
@@ -198,7 +200,7 @@
 test-srcs = tst-pathopt
 tests-execstack-yes = tst-execstack tst-execstack-needed tst-execstack-prog
 ifeq (x86_64,$(config-machine))
-tests += tst-audit3
+tests += tst-audit3 tst-audit4 tst-audit5
 endif
 endif
 ifeq (yesyes,$(have-fpie)$(build-shared))
@@ -237,7 +239,6 @@
 		$(modules-execstack-$(have-z-execstack)) \
 		tst-dlopenrpathmod tst-deep1mod1 tst-deep1mod2 tst-deep1mod3 \
 		tst-dlmopen1mod tst-auditmod1 \
-		tst-auditmod3a tst-auditmod3b \
 		unload3mod1 unload3mod2 unload3mod3 unload3mod4 \
 		unload4mod1 unload4mod2 unload4mod3 unload4mod4 \
 		unload6mod1 unload6mod2 unload6mod3 \
@@ -250,6 +251,11 @@
 endif
 ifeq (yesyes,$(have-fpie)$(build-shared))
 modules-names += tst-piemod1
+endif
+ifeq (x86_64,$(config-machine))
+modules-names += tst-auditmod3a tst-auditmod3b \
+		tst-auditmod4a tst-auditmod4b \
+		tst-auditmod5a tst-auditmod5b
 endif
 modules-execstack-yes = tst-execstack-mod
 extra-test-objs += $(addsuffix .os,$(strip $(modules-names)))
@@ -973,6 +979,14 @@
 $(objpfx)tst-audit3.out: $(objpfx)tst-auditmod3b.so
 tst-audit3-ENV = LD_AUDIT=$(objpfx)tst-auditmod3b.so
 
+$(objpfx)tst-audit4: $(objpfx)tst-auditmod4a.so
+$(objpfx)tst-audit4.out: $(objpfx)tst-auditmod4b.so
+tst-audit4-ENV = LD_AUDIT=$(objpfx)tst-auditmod4b.so
+
+$(objpfx)tst-audit5: $(objpfx)tst-auditmod5a.so
+$(objpfx)tst-audit5.out: $(objpfx)tst-auditmod5b.so
+tst-audit5-ENV = LD_AUDIT=$(objpfx)tst-auditmod5b.so
+
 $(objpfx)tst-global1: $(libdl)
 $(objpfx)tst-global1.out: $(objpfx)testobj6.so $(objpfx)testobj2.so
 
@@ -1115,3 +1129,9 @@
 
 $(objpfx)tst-unique2: $(libdl) $(objpfx)tst-unique2mod1.so
 $(objpfx)tst-unique2.out: $(objpfx)tst-unique2mod2.so
+
+ifeq (yes,$(config-cflags-avx))
+CFLAGS-tst-audit4.c += -mavx
+CFLAGS-tst-auditmod4a.c += -mavx
+CFLAGS-tst-auditmod4b.c += -mavx
+endif

Modified: fsf/trunk/libc/elf/dl-lookup.c
==============================================================================
--- fsf/trunk/libc/elf/dl-lookup.c (original)
+++ fsf/trunk/libc/elf/dl-lookup.c Thu Jul 16 00:09:33 2009
@@ -69,8 +69,371 @@
 #endif
 
 
-/* The actual lookup code.  */
-#include "do-lookup.h"
+/* Inner part of the lookup functions.  We return a value > 0 if we
+   found the symbol, the value 0 if nothing is found and < 0 if
+   something bad happened.  */
+static int
+__attribute_noinline__
+do_lookup_x (const char *undef_name, uint_fast32_t new_hash,
+	     unsigned long int *old_hash, const ElfW(Sym) *ref,
+	     struct sym_val *result, struct r_scope_elem *scope, size_t i,
+	     const struct r_found_version *const version, int flags,
+	     struct link_map *skip, int type_class, struct link_map *undef_map)
+{
+  size_t n = scope->r_nlist;
+  /* Make sure we read the value before proceeding.  Otherwise we
+     might use r_list pointing to the initial scope and r_nlist being
+     the value after a resize.  That is the only path in dl-open.c not
+     protected by GSCOPE.  A read barrier here might be to expensive.  */
+  __asm volatile ("" : "+r" (n), "+m" (scope->r_list));
+  struct link_map **list = scope->r_list;
+
+  do
+    {
+      /* These variables are used in the nested function.  */
+      Elf_Symndx symidx;
+      int num_versions = 0;
+      const ElfW(Sym) *versioned_sym = NULL;
+
+      const struct link_map *map = list[i]->l_real;
+
+      /* Here come the extra test needed for `_dl_lookup_symbol_skip'.  */
+      if (map == skip)
+	continue;
+
+      /* Don't search the executable when resolving a copy reloc.  */
+      if ((type_class & ELF_RTYPE_CLASS_COPY) && map->l_type == lt_executable)
+	continue;
+
+      /* Do not look into objects which are going to be removed.  */
+      if (map->l_removed)
+	continue;
+
+      /* Print some debugging info if wanted.  */
+      if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SYMBOLS, 0))
+	_dl_debug_printf ("symbol=%s;  lookup in file=%s [%lu]\n",
+			  undef_name,
+			  map->l_name[0] ? map->l_name : rtld_progname,
+			  map->l_ns);
+
+      /* If the hash table is empty there is nothing to do here.  */
+      if (map->l_nbuckets == 0)
+	continue;
+
+      /* The tables for this map.  */
+      const ElfW(Sym) *symtab = (const void *) D_PTR (map, l_info[DT_SYMTAB]);
+      const char *strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
+
+
+      /* Nested routine to check whether the symbol matches.  */
+      const ElfW(Sym) *
+      __attribute_noinline__
+      check_match (const ElfW(Sym) *sym)
+      {
+	unsigned int stt = ELFW(ST_TYPE) (sym->st_info);
+	assert (ELF_RTYPE_CLASS_PLT == 1);
+	if (__builtin_expect ((sym->st_value == 0 /* No value.  */
+			       && stt != STT_TLS)
+			      || (type_class & (sym->st_shndx == SHN_UNDEF)),
+			      0))
+	  return NULL;
+
+	/* Ignore all but STT_NOTYPE, STT_OBJECT, STT_FUNC,
+	   STT_COMMON, STT_TLS, and STT_GNU_IFUNC since these are no
+	   code/data definitions.  */
+#define ALLOWED_STT \
+	((1 << STT_NOTYPE) | (1 << STT_OBJECT) | (1 << STT_FUNC) \
+	 | (1 << STT_COMMON) | (1 << STT_TLS) | (1 << STT_GNU_IFUNC))
+	if (__builtin_expect (((1 << stt) & ALLOWED_STT) == 0, 0))
+	  return NULL;
+
+	if (sym != ref && strcmp (strtab + sym->st_name, undef_name))
+	  /* Not the symbol we are looking for.  */
+	  return NULL;
+
+	const ElfW(Half) *verstab = map->l_versyms;
+	if (version != NULL)
+	  {
+	    if (__builtin_expect (verstab == NULL, 0))
+	      {
+		/* We need a versioned symbol but haven't found any.  If
+		   this is the object which is referenced in the verneed
+		   entry it is a bug in the library since a symbol must
+		   not simply disappear.
+
+		   It would also be a bug in the object since it means that
+		   the list of required versions is incomplete and so the
+		   tests in dl-version.c haven't found a problem.*/
+		assert (version->filename == NULL
+			|| ! _dl_name_match_p (version->filename, map));
+
+		/* Otherwise we accept the symbol.  */
+	      }
+	    else
+	      {
+		/* We can match the version information or use the
+		   default one if it is not hidden.  */
+		ElfW(Half) ndx = verstab[symidx] & 0x7fff;
+		if ((map->l_versions[ndx].hash != version->hash
+		     || strcmp (map->l_versions[ndx].name, version->name))
+		    && (version->hidden || map->l_versions[ndx].hash
+			|| (verstab[symidx] & 0x8000)))
+		  /* It's not the version we want.  */
+		  return NULL;
+	      }
+	  }
+	else
+	  {
+	    /* No specific version is selected.  There are two ways we
+	       can got here:
+
+	       - a binary which does not include versioning information
+	       is loaded
+
+	       - dlsym() instead of dlvsym() is used to get a symbol which
+	       might exist in more than one form
+
+	       If the library does not provide symbol version information
+	       there is no problem at at: we simply use the symbol if it
+	       is defined.
+
+	       These two lookups need to be handled differently if the
+	       library defines versions.  In the case of the old
+	       unversioned application the oldest (default) version
+	       should be used.  In case of a dlsym() call the latest and
+	       public interface should be returned.  */
+	    if (verstab != NULL)
+	      {
+		if ((verstab[symidx] & 0x7fff)
+		    >= ((flags & DL_LOOKUP_RETURN_NEWEST) ? 2 : 3))
+		  {
+		    /* Don't accept hidden symbols.  */
+		    if ((verstab[symidx] & 0x8000) == 0
+			&& num_versions++ == 0)
+		      /* No version so far.  */
+		      versioned_sym = sym;
+
+		    return NULL;
+		  }
+	      }
+	  }
+
+	/* There cannot be another entry for this symbol so stop here.  */
+	return sym;
+      }
+
+      const ElfW(Sym) *sym;
+      const ElfW(Addr) *bitmask = map->l_gnu_bitmask;
+      if (__builtin_expect (bitmask != NULL, 1))
+	{
+	  ElfW(Addr) bitmask_word
+	    = bitmask[(new_hash / __ELF_NATIVE_CLASS)
+		      & map->l_gnu_bitmask_idxbits];
+
+	  unsigned int hashbit1 = new_hash & (__ELF_NATIVE_CLASS - 1);
+	  unsigned int hashbit2 = ((new_hash >> map->l_gnu_shift)
+				   & (__ELF_NATIVE_CLASS - 1));
+
+	  if (__builtin_expect ((bitmask_word >> hashbit1)
+				& (bitmask_word >> hashbit2) & 1, 0))
+	    {
+	      Elf32_Word bucket = map->l_gnu_buckets[new_hash
+						     % map->l_nbuckets];
+	      if (bucket != 0)
+		{
+		  const Elf32_Word *hasharr = &map->l_gnu_chain_zero[bucket];
+
+		  do
+		    if (((*hasharr ^ new_hash) >> 1) == 0)
+		      {
+			symidx = hasharr - map->l_gnu_chain_zero;
+			sym = check_match (&symtab[symidx]);
+			if (sym != NULL)
+			  goto found_it;
+		      }
+		  while ((*hasharr++ & 1u) == 0);
+		}
+	    }
+	  /* No symbol found.  */
+	  symidx = SHN_UNDEF;
+	}
+      else
+	{
+	  if (*old_hash == 0xffffffff)
+	    *old_hash = _dl_elf_hash (undef_name);
+
+	  /* Use the old SysV-style hash table.  Search the appropriate
+	     hash bucket in this object's symbol table for a definition
+	     for the same symbol name.  */
+	  for (symidx = map->l_buckets[*old_hash % map->l_nbuckets];
+	       symidx != STN_UNDEF;
+	       symidx = map->l_chain[symidx])
+	    {
+	      sym = check_match (&symtab[symidx]);
+	      if (sym != NULL)
+		goto found_it;
+	    }
+	}
+
+      /* If we have seen exactly one versioned symbol while we are
+	 looking for an unversioned symbol and the version is not the
+	 default version we still accept this symbol since there are
+	 no possible ambiguities.  */
+      sym = num_versions == 1 ? versioned_sym : NULL;
+
+      if (sym != NULL)
+	{
+	found_it:
+	  switch (__builtin_expect (ELFW(ST_BIND) (sym->st_info), STB_GLOBAL))
+	    {
+	    case STB_WEAK:
+	      /* Weak definition.  Use this value if we don't find another.  */
+	      if (__builtin_expect (GLRO(dl_dynamic_weak), 0))
+		{
+		  if (! result->s)
+		    {
+		      result->s = sym;
+		      result->m = (struct link_map *) map;
+		    }
+		  break;
+		}
+	      /* FALLTHROUGH */
+	    case STB_GLOBAL:
+	    success:
+	      /* Global definition.  Just what we need.  */
+	      result->s = sym;
+	      result->m = (struct link_map *) map;
+	      return 1;
+
+	    case STB_GNU_UNIQUE:;
+	      /* We have to determine whether we already found a
+		 symbol with this name before.  If not then we have to
+		 add it to the search table.  If we already found a
+		 definition we have to use it.  */
+	      void enter (struct unique_sym *table, size_t size,
+			  unsigned int hash, const char *name,
+			  const ElfW(Sym) *sym, const struct link_map *map)
+	      {
+		size_t idx = hash % size;
+		size_t hash2 = 1 + hash % (size - 2);
+		while (1)
+		  {
+		    if (table[idx].hashval == 0)
+		      {
+			table[idx].hashval = hash;
+			table[idx].name = strtab + sym->st_name;
+			if ((type_class & ELF_RTYPE_CLASS_COPY) != 0)
+			  {
+			    table[idx].sym = ref;
+			    table[idx].map = undef_map;
+			  }
+			else
+			  {
+			    table[idx].sym = sym;
+			    table[idx].map = map;
+			  }
+			return;
+		      }
+
+		    idx += hash2;
+		    if (idx >= size)
+		      idx -= size;
+		  }
+	      }
+
+	      struct unique_sym_table *tab
+		= &GL(dl_ns)[map->l_ns]._ns_unique_sym_table;
+
+	      __rtld_lock_lock_recursive (tab->lock);
+
+	      struct unique_sym *entries = tab->entries;
+	      size_t size = tab->size;
+	      if (entries != NULL)
+		{
+		  size_t idx = new_hash % size;
+		  size_t hash2 = 1 + new_hash % (size - 2);
+		  while (1)
+		    {
+		      if (entries[idx].hashval == new_hash
+			  && strcmp (entries[idx].name, undef_name) == 0)
+			{
+			  result->s = entries[idx].sym;
+			  result->m = (struct link_map *) entries[idx].map;
+			  __rtld_lock_unlock_recursive (tab->lock);
+			  return 1;
+			}
+
+		      if (entries[idx].hashval == 0
+			  && entries[idx].name == NULL)
+			break;
+
+		      idx += hash2;
+		      if (idx >= size)
+			idx -= size;
+		    }
+
+		  if (size * 3 <= tab->n_elements)
+		    {
+		      /* Expand the table.  */
+		      size_t newsize = _dl_higher_prime_number (size);
+		      struct unique_sym *newentries
+			= calloc (sizeof (struct unique_sym), newsize);
+		      if (newentries == NULL)
+			{
+			nomem:
+			  __rtld_lock_unlock_recursive (tab->lock);
+			  _dl_fatal_printf ("out of memory\n");
+			}
+
+		      for (idx = 0; idx < size; ++idx)
+			if (entries[idx].hashval != 0)
+			  enter (newentries, newsize, entries[idx].hashval,
+				 entries[idx].name, entries[idx].sym,
+				 entries[idx].map);
+
+		      tab->free (entries);
+		      tab->size = newsize;
+		      entries = tab->entries = newentries;
+		      tab->free = free;
+		    }
+		}
+	      else
+		{
+#define INITIAL_NUNIQUE_SYM_TABLE 31
+		  size = INITIAL_NUNIQUE_SYM_TABLE;
+		  entries = calloc (sizeof (struct unique_sym), size);
+		  if (entries == NULL)
+		    goto nomem;
+
+		  tab->entries = entries;
+		  tab->size = size;
+		  tab->free = free;
+		}
+
+	      enter (entries, size, new_hash, strtab + sym->st_name, sym, map);
+	      ++tab->n_elements;
+
+	      __rtld_lock_unlock_recursive (tab->lock);
+
+	      goto success;
+
+	    default:
+	      /* Local symbols are ignored.  */
+	      break;
+	    }
+	}
+
+      /* If this current map is the one mentioned in the verneed entry
+	 and we have not found a weak entry, it is a bug.  */
+      if (symidx == STN_UNDEF && version != NULL && version->filename != NULL
+	  && __builtin_expect (_dl_name_match_p (version->filename, map), 0))
+	return -1;
+    }
+  while (++i < n);
+
+  /* We have not found anything until now.  */
+  return 0;
+}
 
 
 static uint_fast32_t

Added: fsf/trunk/libc/elf/tst-audit4.c
==============================================================================
--- fsf/trunk/libc/elf/tst-audit4.c (added)
+++ fsf/trunk/libc/elf/tst-audit4.c Thu Jul 16 00:09:33 2009
@@ -1,0 +1,35 @@
+/* Test case for x86-64 preserved registers in dynamic linker.  */
+
+#ifdef __AVX__
+#include <stdlib.h>
+#include <string.h>
+#include <cpuid.h>
+#include <immintrin.h>
+
+extern __m256i audit_test (__m256i, __m256i, __m256i, __m256i,
+			   __m256i, __m256i, __m256i, __m256i);
+int
+main (void)
+{
+  unsigned int eax, ebx, ecx, edx;
+
+  /* Run AVX test only if AVX is supported.  */
+  if (__get_cpuid (1, &eax, &ebx, &ecx, &edx)
+      && (ecx & bit_AVX))
+    {
+      __m256i ymm = _mm256_setzero_si256 ();
+      __m256i ret = audit_test (ymm, ymm, ymm, ymm, ymm, ymm, ymm, ymm);
+
+      ymm =  _mm256_set1_epi32 (0x12349876);
+      if (memcmp (&ymm, &ret, sizeof (ret)))
+	abort ();
+    }
+  return 0;
+}
+#else
+int
+main (void)
+{
+  return 0;
+}
+#endif

Added: fsf/trunk/libc/elf/tst-audit5.c
==============================================================================
--- fsf/trunk/libc/elf/tst-audit5.c (added)
+++ fsf/trunk/libc/elf/tst-audit5.c Thu Jul 16 00:09:33 2009
@@ -1,0 +1,21 @@
+/* Test case for x86-64 preserved registers in dynamic linker.  */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <emmintrin.h>
+
+extern __m128i audit_test (__m128i, __m128i, __m128i, __m128i,
+			   __m128i, __m128i, __m128i, __m128i);
+int
+main (void)
+{
+  __m128i xmm = _mm_setzero_si128 ();
+  __m128i ret = audit_test (xmm, xmm, xmm, xmm, xmm, xmm, xmm, xmm);
+
+  xmm = _mm_set1_epi32 (0x12349876);
+  if (memcmp (&xmm, &ret, sizeof (ret)))
+    abort ();
+
+  return 0;
+}

Added: fsf/trunk/libc/elf/tst-auditmod4a.c
==============================================================================
--- fsf/trunk/libc/elf/tst-auditmod4a.c (added)
+++ fsf/trunk/libc/elf/tst-auditmod4a.c Thu Jul 16 00:09:33 2009
@@ -1,0 +1,48 @@
+/* Test case for x86-64 preserved registers in dynamic linker.  */
+
+#ifdef __AVX__
+#include <stdlib.h>
+#include <string.h>
+#include <immintrin.h>
+
+__m256i
+audit_test (__m256i x0, __m256i x1, __m256i x2, __m256i x3,
+	    __m256i x4, __m256i x5, __m256i x6, __m256i x7)
+{
+  __m256i ymm;
+
+  ymm = _mm256_set1_epi32 (1);
+  if (memcmp (&ymm, &x0, sizeof (ymm)))
+    abort ();
+
+  ymm = _mm256_set1_epi32 (2);
+  if (memcmp (&ymm, &x1, sizeof (ymm)))
+    abort ();
+
+  ymm = _mm256_set1_epi32 (3);
+  if (memcmp (&ymm, &x2, sizeof (ymm)))
+    abort ();
+
+  ymm = _mm256_set1_epi32 (4);
+  if (memcmp (&ymm, &x3, sizeof (ymm)))
+    abort ();
+
+  ymm = _mm256_set1_epi32 (5);
+  if (memcmp (&ymm, &x4, sizeof (ymm)))
+    abort ();
+
+  ymm = _mm256_set1_epi32 (6);
+  if (memcmp (&ymm, &x5, sizeof (ymm)))
+    abort ();
+
+  ymm = _mm256_set1_epi32 (7);
+  if (memcmp (&ymm, &x6, sizeof (ymm)))
+    abort ();
+
+  ymm = _mm256_set1_epi32 (8);
+  if (memcmp (&ymm, &x7, sizeof (ymm)))
+    abort ();
+
+  return _mm256_setzero_si256 ();
+}
+#endif

Added: fsf/trunk/libc/elf/tst-auditmod4b.c
==============================================================================
--- fsf/trunk/libc/elf/tst-auditmod4b.c (added)
+++ fsf/trunk/libc/elf/tst-auditmod4b.c Thu Jul 16 00:09:33 2009
@@ -1,0 +1,206 @@
+/* Verify that changing AVX registers in audit library won't affect
+   function parameter passing/return.  */
+
+#include <dlfcn.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <bits/wordsize.h>
+#include <gnu/lib-names.h>
+
+unsigned int
+la_version (unsigned int v)
+{
+  setlinebuf (stdout);
+
+  printf ("version: %u\n", v);
+
+  char buf[20];
+  sprintf (buf, "%u", v);
+
+  return v;
+}
+
+void
+la_activity (uintptr_t *cookie, unsigned int flag)
+{
+  if (flag == LA_ACT_CONSISTENT)
+    printf ("activity: consistent\n");
+  else if (flag == LA_ACT_ADD)
+    printf ("activity: add\n");
+  else if (flag == LA_ACT_DELETE)
+    printf ("activity: delete\n");
+  else
+    printf ("activity: unknown activity %u\n", flag);
+}
+
+char *
+la_objsearch (const char *name, uintptr_t *cookie, unsigned int flag)
+{
+  char buf[100];
+  const char *flagstr;
+  if (flag == LA_SER_ORIG)
+    flagstr = "LA_SET_ORIG";
+  else if (flag == LA_SER_LIBPATH)
+    flagstr = "LA_SER_LIBPATH";
+  else if (flag == LA_SER_RUNPATH)
+    flagstr = "LA_SER_RUNPATH";
+  else if (flag == LA_SER_CONFIG)
+    flagstr = "LA_SER_CONFIG";
+  else if (flag == LA_SER_DEFAULT)
+    flagstr = "LA_SER_DEFAULT";
+  else if (flag == LA_SER_SECURE)
+    flagstr = "LA_SER_SECURE";
+  else
+    {
+       sprintf (buf, "unknown flag %d", flag);
+       flagstr = buf;
+    }
+  printf ("objsearch: %s, %s\n", name, flagstr);
+
+  return (char *) name;
+}
+
+unsigned int
+la_objopen (struct link_map *l, Lmid_t lmid, uintptr_t *cookie)
+{
+  printf ("objopen: %ld, %s\n", lmid, l->l_name);
+
+  return 3;
+}
+
+void
+la_preinit (uintptr_t *cookie)
+{
+  printf ("preinit\n");
+}
+
+unsigned int
+la_objclose  (uintptr_t *cookie)
+{
+  printf ("objclose\n");
+  return 0;
+}
+
+uintptr_t
+la_symbind64 (Elf64_Sym *sym, unsigned int ndx, uintptr_t *refcook,
+	      uintptr_t *defcook, unsigned int *flags, const char *symname)
+{
+  printf ("symbind64: symname=%s, st_value=%#lx, ndx=%u, flags=%u\n",
+	  symname, (long int) sym->st_value, ndx, *flags);
+
+  return sym->st_value;
+}
+
+#define pltenter la_x86_64_gnu_pltenter
+#define pltexit la_x86_64_gnu_pltexit
+#define La_regs La_x86_64_regs
+#define La_retval La_x86_64_retval
+#define int_retval lrv_rax
+
+#include <tst-audit.h>
+
+#ifdef __AVX__
+#include <immintrin.h>
+#include <cpuid.h>
+
+static int avx = -1;
+
+static int
+__attribute ((always_inline))
+check_avx (void)
+{
+  if (avx == -1)
+    {
+      unsigned int eax, ebx, ecx, edx;
+
+      if (__get_cpuid (1, &eax, &ebx, &ecx, &edx)
+	  && (ecx & bit_AVX))
+	avx = 1;
+      else
+	avx = 0;
+    }
+  return avx;
+}
+#else
+#include <emmintrin.h>
+#endif
+
+ElfW(Addr)
+pltenter (ElfW(Sym) *sym, unsigned int ndx, uintptr_t *refcook,
+	  uintptr_t *defcook, La_regs *regs, unsigned int *flags,
+	  const char *symname, long int *framesizep)
+{
+  printf ("pltenter: symname=%s, st_value=%#lx, ndx=%u, flags=%u\n",
+	  symname, (long int) sym->st_value, ndx, *flags);
+
+#ifdef __AVX__
+  if (check_avx () && strcmp (symname, "audit_test") == 0)
+    {
+      __m256i zero = _mm256_setzero_si256 ();
+      if (memcmp (&regs->lr_vector[0], &zero, sizeof (zero))
+	  || memcmp (&regs->lr_vector[1], &zero, sizeof (zero))
+	  || memcmp (&regs->lr_vector[2], &zero, sizeof (zero))
+	  || memcmp (&regs->lr_vector[3], &zero, sizeof (zero))
+	  || memcmp (&regs->lr_vector[4], &zero, sizeof (zero))
+	  || memcmp (&regs->lr_vector[5], &zero, sizeof (zero))
+	  || memcmp (&regs->lr_vector[6], &zero, sizeof (zero))
+	  || memcmp (&regs->lr_vector[7], &zero, sizeof (zero)))
+	abort ();
+
+      for (int i = 0; i < 8; i++)
+	regs->lr_vector[i].ymm[0]
+	  = (La_x86_64_ymm) _mm256_set1_epi32 (i + 1);
+
+      __m256i ymm = _mm256_set1_epi32 (-1);
+      asm volatile ("vmovdqa %0, %%ymm0" : : "x" (ymm) : "xmm0" );
+      asm volatile ("vmovdqa %0, %%ymm1" : : "x" (ymm) : "xmm1" );
+      asm volatile ("vmovdqa %0, %%ymm2" : : "x" (ymm) : "xmm2" );
+      asm volatile ("vmovdqa %0, %%ymm3" : : "x" (ymm) : "xmm3" );
+      asm volatile ("vmovdqa %0, %%ymm4" : : "x" (ymm) : "xmm4" );
+      asm volatile ("vmovdqa %0, %%ymm5" : : "x" (ymm) : "xmm5" );
+      asm volatile ("vmovdqa %0, %%ymm6" : : "x" (ymm) : "xmm6" );
+      asm volatile ("vmovdqa %0, %%ymm7" : : "x" (ymm) : "xmm7" );
+
+      *framesizep = 1024;
+    }
+#endif
+
+  return sym->st_value;
+}
+
+unsigned int
+pltexit (ElfW(Sym) *sym, unsigned int ndx, uintptr_t *refcook,
+	 uintptr_t *defcook, const La_regs *inregs, La_retval *outregs,
+	 const char *symname)
+{
+  printf ("pltexit: symname=%s, st_value=%#lx, ndx=%u, retval=%tu\n",
+	  symname, (long int) sym->st_value, ndx, outregs->int_retval);
+
+#ifdef __AVX__
+  if (check_avx () && strcmp (symname, "audit_test") == 0)
+    {
+      __m256i zero = _mm256_setzero_si256 ();
+      if (memcmp (&outregs->lrv_vector0, &zero, sizeof (zero)))
+	abort ();
+
+      for (int i = 0; i < 8; i++)
+	{
+	  __m256i ymm = _mm256_set1_epi32 (i + 1);
+	  if (memcmp (&inregs->lr_vector[i], &ymm, sizeof (ymm)) != 0)
+	    abort ();
+	}
+
+      outregs->lrv_vector0.ymm[0]
+	= (La_x86_64_ymm) _mm256_set1_epi32 (0x12349876);
+
+      __m256i ymm = _mm256_set1_epi32 (-1);
+      asm volatile ("vmovdqa %0, %%ymm0" : : "x" (ymm) : "xmm0" );
+      asm volatile ("vmovdqa %0, %%ymm1" : : "x" (ymm) : "xmm1" );
+    }
+#endif
+
+  return 0;
+}

Added: fsf/trunk/libc/elf/tst-auditmod5a.c
==============================================================================
--- fsf/trunk/libc/elf/tst-auditmod5a.c (added)
+++ fsf/trunk/libc/elf/tst-auditmod5a.c Thu Jul 16 00:09:33 2009
@@ -1,0 +1,46 @@
+/* Test case for x86-64 preserved registers in dynamic linker.  */
+
+#include <stdlib.h>
+#include <string.h>
+#include <emmintrin.h>
+
+__m128i
+audit_test (__m128i x0, __m128i x1, __m128i x2, __m128i x3,
+	    __m128i x4, __m128i x5, __m128i x6, __m128i x7)
+{
+  __m128i xmm;
+
+  xmm =  _mm_set1_epi32 (1);
+  if (memcmp (&xmm, &x0, sizeof (xmm)))
+    abort ();
+
+  xmm =  _mm_set1_epi32 (2);
+  if (memcmp (&xmm, &x1, sizeof (xmm)))
+    abort ();
+
+  xmm =  _mm_set1_epi32 (3);
+  if (memcmp (&xmm, &x2, sizeof (xmm)))
+    abort ();
+
+  xmm =  _mm_set1_epi32 (4);
+  if (memcmp (&xmm, &x3, sizeof (xmm)))
+    abort ();
+
+  xmm =  _mm_set1_epi32 (5);
+  if (memcmp (&xmm, &x4, sizeof (xmm)))
+    abort ();
+
+  xmm =  _mm_set1_epi32 (6);
+  if (memcmp (&xmm, &x5, sizeof (xmm)))
+    abort ();
+
+  xmm =  _mm_set1_epi32 (7);
+  if (memcmp (&xmm, &x6, sizeof (xmm)))
+    abort ();
+
+  xmm =  _mm_set1_epi32 (8);
+  if (memcmp (&xmm, &x7, sizeof (xmm)))
+    abort ();
+
+  return _mm_setzero_si128 ();
+}

Added: fsf/trunk/libc/elf/tst-auditmod5b.c
==============================================================================
--- fsf/trunk/libc/elf/tst-auditmod5b.c (added)
+++ fsf/trunk/libc/elf/tst-auditmod5b.c Thu Jul 16 00:09:33 2009
@@ -1,0 +1,178 @@
+/* Verify that changing xmm registers in audit library won't affect
+   function parameter passing/return.  */
+
+#include <dlfcn.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <bits/wordsize.h>
+#include <gnu/lib-names.h>
+#include <emmintrin.h>
+
+unsigned int
+la_version (unsigned int v)
+{
+  setlinebuf (stdout);
+
+  printf ("version: %u\n", v);
+
+  char buf[20];
+  sprintf (buf, "%u", v);
+
+  return v;
+}
+
+void
+la_activity (uintptr_t *cookie, unsigned int flag)
+{
+  if (flag == LA_ACT_CONSISTENT)
+    printf ("activity: consistent\n");
+  else if (flag == LA_ACT_ADD)
+    printf ("activity: add\n");
+  else if (flag == LA_ACT_DELETE)
+    printf ("activity: delete\n");
+  else
+    printf ("activity: unknown activity %u\n", flag);
+}
+
+char *
+la_objsearch (const char *name, uintptr_t *cookie, unsigned int flag)
+{
+  char buf[100];
+  const char *flagstr;
+  if (flag == LA_SER_ORIG)
+    flagstr = "LA_SET_ORIG";
+  else if (flag == LA_SER_LIBPATH)
+    flagstr = "LA_SER_LIBPATH";
+  else if (flag == LA_SER_RUNPATH)
+    flagstr = "LA_SER_RUNPATH";
+  else if (flag == LA_SER_CONFIG)
+    flagstr = "LA_SER_CONFIG";
+  else if (flag == LA_SER_DEFAULT)
+    flagstr = "LA_SER_DEFAULT";
+  else if (flag == LA_SER_SECURE)
+    flagstr = "LA_SER_SECURE";
+  else
+    {
+       sprintf (buf, "unknown flag %d", flag);
+       flagstr = buf;
+    }
+  printf ("objsearch: %s, %s\n", name, flagstr);
+
+  return (char *) name;
+}
+
+unsigned int
+la_objopen (struct link_map *l, Lmid_t lmid, uintptr_t *cookie)
+{
+  printf ("objopen: %ld, %s\n", lmid, l->l_name);
+
+  return 3;
+}
+
+void
+la_preinit (uintptr_t *cookie)
+{
+  printf ("preinit\n");
+}
+
+unsigned int
+la_objclose  (uintptr_t *cookie)
+{
+  printf ("objclose\n");
+  return 0;
+}
+
+uintptr_t
+la_symbind64 (Elf64_Sym *sym, unsigned int ndx, uintptr_t *refcook,
+	      uintptr_t *defcook, unsigned int *flags, const char *symname)
+{
+  printf ("symbind64: symname=%s, st_value=%#lx, ndx=%u, flags=%u\n",
+	  symname, (long int) sym->st_value, ndx, *flags);
+
+  return sym->st_value;
+}
+
+#define pltenter la_x86_64_gnu_pltenter
+#define pltexit la_x86_64_gnu_pltexit
+#define La_regs La_x86_64_regs
+#define La_retval La_x86_64_retval
+#define int_retval lrv_rax
+
+#include <tst-audit.h>
+
+ElfW(Addr)
+pltenter (ElfW(Sym) *sym, unsigned int ndx, uintptr_t *refcook,
+	  uintptr_t *defcook, La_regs *regs, unsigned int *flags,
+	  const char *symname, long int *framesizep)
+{
+  printf ("pltenter: symname=%s, st_value=%#lx, ndx=%u, flags=%u\n",
+	  symname, (long int) sym->st_value, ndx, *flags);
+
+  __m128i minusone = _mm_set1_epi32 (-1);
+
+  if (strcmp (symname, "audit_test") == 0)
+    {
+      __m128i zero = _mm_setzero_si128 ();
+      if (memcmp (&regs->lr_xmm[0], &zero, sizeof (zero))
+	  || memcmp (&regs->lr_xmm[1], &zero, sizeof (zero))
+	  || memcmp (&regs->lr_xmm[2], &zero, sizeof (zero))
+	  || memcmp (&regs->lr_xmm[3], &zero, sizeof (zero))
+	  || memcmp (&regs->lr_xmm[4], &zero, sizeof (zero))
+	  || memcmp (&regs->lr_xmm[5], &zero, sizeof (zero))
+	  || memcmp (&regs->lr_xmm[6], &zero, sizeof (zero))
+	  || memcmp (&regs->lr_xmm[7], &zero, sizeof (zero)))
+	abort ();
+
+      for (int i = 0; i < 8; i++)
+	regs->lr_xmm[i] = (La_x86_64_xmm) _mm_set1_epi32 (i + 1);
+
+      *framesizep = 1024;
+    }
+
+  asm volatile ("movdqa %0, %%xmm0" : : "x" (minusone) : "xmm0" );
+  asm volatile ("movdqa %0, %%xmm1" : : "x" (minusone) : "xmm1" );
+  asm volatile ("movdqa %0, %%xmm2" : : "x" (minusone) : "xmm2" );
+  asm volatile ("movdqa %0, %%xmm3" : : "x" (minusone) : "xmm3" );
+  asm volatile ("movdqa %0, %%xmm4" : : "x" (minusone) : "xmm4" );
+  asm volatile ("movdqa %0, %%xmm5" : : "x" (minusone) : "xmm5" );
+  asm volatile ("movdqa %0, %%xmm6" : : "x" (minusone) : "xmm6" );
+  asm volatile ("movdqa %0, %%xmm7" : : "x" (minusone) : "xmm7" );
+
+  return sym->st_value;
+}
+
+unsigned int
+pltexit (ElfW(Sym) *sym, unsigned int ndx, uintptr_t *refcook,
+	 uintptr_t *defcook, const La_regs *inregs, La_retval *outregs,
+	 const char *symname)
+{
+  printf ("pltexit: symname=%s, st_value=%#lx, ndx=%u, retval=%tu\n",
+	  symname, (long int) sym->st_value, ndx, outregs->int_retval);
+
+  __m128i xmm;
+
+  if (strcmp (symname, "audit_test") == 0)
+    {
+      __m128i zero = _mm_setzero_si128 ();
+      if (memcmp (&outregs->lrv_xmm0, &zero, sizeof (zero)))
+	abort ();
+
+      for (int i = 0; i < 8; i++)
+	{
+	  xmm = _mm_set1_epi32 (i + 1);
+	  if (memcmp (&inregs->lr_xmm[i], &xmm, sizeof (xmm)) != 0)
+	    abort ();
+	}
+
+      outregs->lrv_xmm0 = (La_x86_64_xmm) _mm_set1_epi32 (0x12349876);
+    }
+
+  xmm = _mm_set1_epi32 (-1);
+  asm volatile ("movdqa %0, %%xmm0" : : "x" (xmm) : "xmm0" );
+  asm volatile ("movdqa %0, %%xmm1" : : "x" (xmm) : "xmm1" );
+
+  return 0;
+}

Modified: fsf/trunk/libc/sysdeps/x86_64/Makefile
==============================================================================
--- fsf/trunk/libc/sysdeps/x86_64/Makefile (original)
+++ fsf/trunk/libc/sysdeps/x86_64/Makefile Thu Jul 16 00:09:33 2009
@@ -4,6 +4,7 @@
 ifeq ($(subdir),csu)
 sysdep_routines += hp-timing
 elide-routines.os += hp-timing
+gen-as-const-headers += link-defines.sym
 endif
 
 ifeq ($(subdir),gmon)

Modified: fsf/trunk/libc/sysdeps/x86_64/bits/link.h
==============================================================================
--- fsf/trunk/libc/sysdeps/x86_64/bits/link.h (original)
+++ fsf/trunk/libc/sysdeps/x86_64/bits/link.h Thu Jul 16 00:09:33 2009
@@ -65,9 +65,18 @@
 /* Registers for entry into PLT on x86-64.  */
 # if __GNUC_PREREQ (4,0)
 typedef float La_x86_64_xmm __attribute__ ((__vector_size__ (16)));
+typedef float La_x86_64_ymm __attribute__ ((__vector_size__ (32)));
 # else
 typedef float La_x86_64_xmm __attribute__ ((__mode__ (__V4SF__)));
 # endif
+
+typedef union
+{
+# if __GNUC_PREREQ (4,0)
+  La_x86_64_ymm ymm[2];
+# endif
+  La_x86_64_xmm xmm[4];
+} La_x86_64_vector __attribute__ ((aligned(16)));
 
 typedef struct La_x86_64_regs
 {
@@ -80,6 +89,7 @@
   uint64_t lr_rbp;
   uint64_t lr_rsp;
   La_x86_64_xmm lr_xmm[8];
+  La_x86_64_vector lr_vector[8];
 } La_x86_64_regs;
 
 /* Return values for calls from PLT on x86-64.  */
@@ -91,6 +101,8 @@
   La_x86_64_xmm lrv_xmm1;
   long double lrv_st0;
   long double lrv_st1;
+  La_x86_64_vector lrv_vector0;
+  La_x86_64_vector lrv_vector1;
 } La_x86_64_retval;
 
 

Modified: fsf/trunk/libc/sysdeps/x86_64/dl-trampoline.S
==============================================================================
--- fsf/trunk/libc/sysdeps/x86_64/dl-trampoline.S (original)
+++ fsf/trunk/libc/sysdeps/x86_64/dl-trampoline.S Thu Jul 16 00:09:33 2009
@@ -17,7 +17,9 @@
    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
    02111-1307 USA.  */
 
+#include <config.h>
 #include <sysdep.h>
+#include <link-defines.h>
 
 	.text
 	.globl _dl_runtime_resolve
@@ -89,26 +91,85 @@
 
 	/* Actively align the La_x86_64_regs structure.  */
 	andq $0xfffffffffffffff0, %rsp
-	subq $192, %rsp		# sizeof(La_x86_64_regs)
+# ifdef HAVE_AVX_SUPPORT
+	/* sizeof(La_x86_64_regs).  Need extra space for 8 SSE registers
+	   to detect if any xmm0-xmm7 registers are changed by audit
+	   module.  */
+	subq $(LR_SIZE + XMM_SIZE*8), %rsp
+# else
+	subq $LR_SIZE, %rsp		# sizeof(La_x86_64_regs)
+# endif
 	movq %rsp, 24(%rbx)
 
-	movq %rdx,   (%rsp)	# Fill the La_x86_64_regs structure.
-	movq %r8,   8(%rsp)
-	movq %r9,  16(%rsp)
-	movq %rcx, 24(%rsp)
-	movq %rsi, 32(%rsp)
-	movq %rdi, 40(%rsp)
-	movq %rbp, 48(%rsp)
+	/* Fill the La_x86_64_regs structure.  */
+	movq %rdx, LR_RDX_OFFSET(%rsp)
+	movq %r8,  LR_R8_OFFSET(%rsp)
+	movq %r9,  LR_R9_OFFSET(%rsp)
+	movq %rcx, LR_RCX_OFFSET(%rsp)
+	movq %rsi, LR_RSI_OFFSET(%rsp)
+	movq %rdi, LR_RDI_OFFSET(%rsp)
+	movq %rbp, LR_RBP_OFFSET(%rsp)
+
 	leaq 48(%rbx), %rax
-	movq %rax, 56(%rsp)
-	movaps %xmm0,  64(%rsp)
-	movaps %xmm1,  80(%rsp)
-	movaps %xmm2,  96(%rsp)
-	movaps %xmm3, 112(%rsp)
-	movaps %xmm4, 128(%rsp)
-	movaps %xmm5, 144(%rsp)
-	movaps %xmm6, 160(%rsp)
-	movaps %xmm7, 176(%rsp)
+	movq %rax, LR_RSP_OFFSET(%rsp)
+
+	/* We always store the XMM registers even if AVX is available.
+	   This is to provide backward binary compatility for existing
+	   audit modules.  */
+	movaps %xmm0,		   (LR_XMM_OFFSET)(%rsp)
+	movaps %xmm1, (LR_XMM_OFFSET +   XMM_SIZE)(%rsp)
+	movaps %xmm2, (LR_XMM_OFFSET + XMM_SIZE*2)(%rsp)
+	movaps %xmm3, (LR_XMM_OFFSET + XMM_SIZE*3)(%rsp)
+	movaps %xmm4, (LR_XMM_OFFSET + XMM_SIZE*4)(%rsp)
+	movaps %xmm5, (LR_XMM_OFFSET + XMM_SIZE*5)(%rsp)
+	movaps %xmm6, (LR_XMM_OFFSET + XMM_SIZE*6)(%rsp)
+	movaps %xmm7, (LR_XMM_OFFSET + XMM_SIZE*7)(%rsp)
+
+# ifdef HAVE_AVX_SUPPORT
+	.data
+L(have_avx):
+	.zero 4
+	.size L(have_avx), 4
+	.previous
+
+	cmpl	$0, L(have_avx)(%rip)
+	jne	1f
+	movq	%rbx, %r11		# Save rbx
+	movl	$1, %eax
+	cpuid
+	movq	%r11,%rbx		# Restore rbx
+	movl	$1, %eax
+	testl	$(1 << 28), %ecx
+	jne	2f
+	negl	%eax
+2:	movl	%eax, L(have_avx)(%rip)
+	cmpl	$0, %eax
+
+1:	js	L(no_avx1)
+
+	/* This is to support AVX audit modules.  */
+	vmovdqu %ymm0,		      (LR_VECTOR_OFFSET)(%rsp)
+	vmovdqu %ymm1, (LR_VECTOR_OFFSET +   VECTOR_SIZE)(%rsp)
+	vmovdqu %ymm2, (LR_VECTOR_OFFSET + VECTOR_SIZE*2)(%rsp)
+	vmovdqu %ymm3, (LR_VECTOR_OFFSET + VECTOR_SIZE*3)(%rsp)
+	vmovdqu %ymm4, (LR_VECTOR_OFFSET + VECTOR_SIZE*4)(%rsp)
+	vmovdqu %ymm5, (LR_VECTOR_OFFSET + VECTOR_SIZE*5)(%rsp)
+	vmovdqu %ymm6, (LR_VECTOR_OFFSET + VECTOR_SIZE*6)(%rsp)
+	vmovdqu %ymm7, (LR_VECTOR_OFFSET + VECTOR_SIZE*7)(%rsp)
+
+	/* Save xmm0-xmm7 registers to detect if any of them are
+	   changed by audit module.  */
+	vmovdqa %xmm0,		    (LR_SIZE)(%rsp)
+	vmovdqa %xmm1, (LR_SIZE +   XMM_SIZE)(%rsp)
+	vmovdqa %xmm2, (LR_SIZE + XMM_SIZE*2)(%rsp)
+	vmovdqa %xmm3, (LR_SIZE + XMM_SIZE*3)(%rsp)
+	vmovdqa %xmm4, (LR_SIZE + XMM_SIZE*4)(%rsp)
+	vmovdqa %xmm5, (LR_SIZE + XMM_SIZE*5)(%rsp)
+	vmovdqa %xmm6, (LR_SIZE + XMM_SIZE*6)(%rsp)
+	vmovdqa %xmm7, (LR_SIZE + XMM_SIZE*7)(%rsp)
+
+L(no_avx1):
+# endif
 
 	movq %rsp, %rcx		# La_x86_64_regs pointer to %rcx.
 	movq 48(%rbx), %rdx	# Load return address if needed.
@@ -120,28 +181,95 @@
 	movq %rax, %r11		# Save return value.
 
 	movq 8(%rbx), %rax	# Get back register content.
-	movq      (%rsp), %rdx
-	movq     8(%rsp), %r8
-	movq    16(%rsp), %r9
-	movaps  64(%rsp), %xmm0
-	movaps  80(%rsp), %xmm1
-	movaps  96(%rsp), %xmm2
-	movaps 112(%rsp), %xmm3
-	movaps 128(%rsp), %xmm4
-	movaps 144(%rsp), %xmm5
-	movaps 160(%rsp), %xmm6
-	movaps 176(%rsp), %xmm7
-
-	movq 16(%rbx), %r10	# Anything in framesize?
+	movq LR_RDX_OFFSET(%rsp), %rdx
+	movq  LR_R8_OFFSET(%rsp), %r8
+	movq  LR_R9_OFFSET(%rsp), %r9
+
+# ifdef HAVE_AVX_SUPPORT
+	cmpl	$0, L(have_avx)(%rip)
+	js	L(no_avx2)
+
+	/* Check if any xmm0-xmm7 registers are changed by audit
+	   module.  */
+	vmovdqa (LR_XMM_OFFSET)(%rsp), %xmm0
+	vpcmpeqq (LR_SIZE)(%rsp), %xmm0, %xmm1
+	vpmovmskb %xmm1, %esi
+	cmpl $0xffff, %esi
+	je 1f
+	vmovdqu			(LR_VECTOR_OFFSET)(%rsp), %ymm0
+
+1:	vmovdqa (LR_XMM_OFFSET + XMM_SIZE)(%rsp), %xmm1
+	vpcmpeqq (LR_SIZE + XMM_SIZE)(%rsp), %xmm1, %xmm2
+	vpmovmskb %xmm2, %esi
+	cmpl $0xffff, %esi
+	je 1f
+	vmovdqu	  (LR_VECTOR_OFFSET + VECTOR_SIZE)(%rsp), %ymm1
+
+1:	vmovdqa (LR_XMM_OFFSET + XMM_SIZE*2)(%rsp), %xmm2
+	vpcmpeqq (LR_SIZE + XMM_SIZE*2)(%rsp), %xmm2, %xmm3
+	vpmovmskb %xmm3, %esi
+	cmpl $0xffff, %esi
+	je 1f
+	vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*2)(%rsp), %ymm2
+
+1:	vmovdqa (LR_XMM_OFFSET + XMM_SIZE*3)(%rsp), %xmm3
+	vpcmpeqq (LR_SIZE + XMM_SIZE*3)(%rsp), %xmm3, %xmm4
+	vpmovmskb %xmm4, %esi
+	cmpl $0xffff, %esi
+	je 1f
+	vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*3)(%rsp), %ymm3
+
+1:	vmovdqa (LR_XMM_OFFSET + XMM_SIZE*4)(%rsp), %xmm4
+	vpcmpeqq (LR_SIZE + XMM_SIZE*4)(%rsp), %xmm4, %xmm5
+	vpmovmskb %xmm5, %esi
+	cmpl $0xffff, %esi
+	je 1f
+	vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*4)(%rsp), %ymm4
+
+1:	vmovdqa (LR_XMM_OFFSET + XMM_SIZE*5)(%rsp), %xmm5
+	vpcmpeqq (LR_SIZE + XMM_SIZE*5)(%rsp), %xmm5, %xmm6
+	vpmovmskb %xmm6, %esi
+	cmpl $0xffff, %esi
+	je 1f
+	vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*5)(%rsp), %ymm5
+
+1:	vmovdqa (LR_XMM_OFFSET + XMM_SIZE*6)(%rsp), %xmm6
+	vpcmpeqq (LR_SIZE + XMM_SIZE*6)(%rsp), %xmm6, %xmm7
+	vpmovmskb %xmm7, %esi
+	cmpl $0xffff, %esi
+	je 1f
+	vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*6)(%rsp), %ymm6
+
+1:	vmovdqa (LR_XMM_OFFSET + XMM_SIZE*7)(%rsp), %xmm7
+	vpcmpeqq (LR_SIZE + XMM_SIZE*7)(%rsp), %xmm7, %xmm8
+	vpmovmskb %xmm8, %esi
+	cmpl $0xffff, %esi
+	je 1f
+	vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*7)(%rsp), %ymm7
+	jmp	1f
+
+L(no_avx2):
+# endif
+	movaps		    (LR_XMM_OFFSET)(%rsp), %xmm0
+	movaps	 (LR_XMM_OFFSET + XMM_SIZE)(%rsp), %xmm1
+	movaps (LR_XMM_OFFSET + XMM_SIZE*2)(%rsp), %xmm2
+	movaps (LR_XMM_OFFSET + XMM_SIZE*3)(%rsp), %xmm3
+	movaps (LR_XMM_OFFSET + XMM_SIZE*4)(%rsp), %xmm4
+	movaps (LR_XMM_OFFSET + XMM_SIZE*5)(%rsp), %xmm5
+	movaps (LR_XMM_OFFSET + XMM_SIZE*6)(%rsp), %xmm6
+	movaps (LR_XMM_OFFSET + XMM_SIZE*7)(%rsp), %xmm7
+
+1:	movq 16(%rbx), %r10	# Anything in framesize?
 	testq %r10, %r10
-	jns 1f
+	jns 3f
 
 	/* There's nothing in the frame size, so there
 	   will be no call to the _dl_call_pltexit. */
 
-	movq 24(%rsp), %rcx	# Get back registers content.
-	movq 32(%rsp), %rsi
-	movq 40(%rsp), %rdi
+	/* Get back registers content.  */
+	movq LR_RCX_OFFSET(%rsp), %rcx
+	movq LR_RSI_OFFSET(%rsp), %rsi
+	movq LR_RDI_OFFSET(%rsp), %rdi
 
 	movq %rbx, %rsp
 	movq (%rsp), %rbx
@@ -153,7 +281,7 @@
 	cfi_adjust_cfa_offset(-48)
 	jmp *%r11		# Jump to function address.
 
-1:
+3:
 	cfi_adjust_cfa_offset(48)
 	cfi_rel_offset(%rbx, 0)
 	cfi_def_cfa_register(%rbx)
@@ -163,7 +291,7 @@
 	   temporary buffer of the size specified by the 'framesize'
 	   returned from _dl_profile_fixup */
 
-	leaq 56(%rbx), %rsi	# stack
+	leaq LR_RSP_OFFSET(%rbx), %rsi	# stack
 	addq $8, %r10
 	andq $0xfffffffffffffff0, %r10
 	movq %r10, %rcx
@@ -185,31 +313,81 @@
 	   _dl_call_pltexit.  The La_x86_64_regs is being pointed by rsp now,
 	   so we just need to allocate the sizeof(La_x86_64_retval) space on
 	   the stack, since the alignment has already been taken care of. */
-
-	subq $80, %rsp		# sizeof(La_x86_64_retval)
+# ifdef HAVE_AVX_SUPPORT
+	/* sizeof(La_x86_64_retval).  Need extra space for 2 SSE
+	   registers to detect if xmm0/xmm1 registers are changed
+	   by audit module.  */
+	subq $(LRV_SIZE + XMM_SIZE*2), %rsp
+# else
+	subq $LRV_SIZE, %rsp	# sizeof(La_x86_64_retval)
+# endif
 	movq %rsp, %rcx		# La_x86_64_retval argument to %rcx.
 
-	movq %rax, (%rcx)	# Fill in the La_x86_64_retval structure.
-	movq %rdx, 8(%rcx)
-	movaps %xmm0, 16(%rcx)
-	movaps %xmm1, 32(%rcx)
-	fstpt 48(%rcx)
-	fstpt 64(%rcx)
+	/* Fill in the La_x86_64_retval structure.  */
+	movq %rax, LRV_RAX_OFFSET(%rcx)
+	movq %rdx, LRV_RDX_OFFSET(%rcx)
+
+	movaps %xmm0, LRV_XMM0_OFFSET(%rcx)
+	movaps %xmm1, LRV_XMM1_OFFSET(%rcx)
+
+# ifdef HAVE_AVX_SUPPORT
+	cmpl	$0, L(have_avx)(%rip)
+	js	L(no_avx3)
+
+	/* This is to support AVX audit modules.  */
+	vmovdqu %ymm0, LRV_VECTOR0_OFFSET(%rcx)
+	vmovdqu %ymm1, LRV_VECTOR1_OFFSET(%rcx)
+
+	/* Save xmm0/xmm1 registers to detect if they are changed
+	   by audit module.  */
+	vmovdqa %xmm0,		  (LRV_SIZE)(%rcx)
+	vmovdqa %xmm1, (LRV_SIZE + XMM_SIZE)(%rcx)
+
+L(no_avx3):
+# endif
+
+	fstpt LRV_ST0_OFFSET(%rcx)
+	fstpt LRV_ST1_OFFSET(%rcx)
 
 	movq 24(%rbx), %rdx	# La_x86_64_regs argument to %rdx.
 	movq 40(%rbx), %rsi	# Copy args pushed by PLT in register.
         movq 32(%rbx), %rdi	# %rdi: link_map, %rsi: reloc_index
 	call _dl_call_pltexit
 
-	movq  (%rsp), %rax	# Restore return registers.
-	movq 8(%rsp), %rdx
-	movaps 16(%rsp), %xmm0
-	movaps 32(%rsp), %xmm1
-	fldt 64(%rsp)
-	fldt 48(%rsp)
+	/* Restore return registers.  */
+	movq LRV_RAX_OFFSET(%rsp), %rax
+	movq LRV_RDX_OFFSET(%rsp), %rdx
+
+# ifdef HAVE_AVX_SUPPORT
+	cmpl	$0, L(have_avx)(%rip)
+	js	L(no_avx4)
+
+	/* Check if xmm0/xmm1 registers are changed by audit module.  */
+	vmovdqa LRV_XMM0_OFFSET(%rsp), %xmm0
+	vpcmpeqq (LRV_SIZE)(%rsp), %xmm0, %xmm1
+	vpmovmskb %xmm1, %esi
+	cmpl $0xffff, %esi
+	je 1f
+	vmovdqu LRV_VECTOR0_OFFSET(%rsp), %ymm0
+
+1:	vmovdqa LRV_XMM1_OFFSET(%rsp), %xmm1
+	vpcmpeqq (LRV_SIZE + XMM_SIZE)(%rsp), %xmm1, %xmm2
+	vpmovmskb %xmm2, %esi
+	cmpl $0xffff, %esi
+	je 1f
+	vmovdqu LRV_VECTOR1_OFFSET(%rsp), %ymm1
+	jmp 1f
+
+L(no_avx4):
+# endif
+	movaps LRV_XMM0_OFFSET(%rsp), %xmm0
+	movaps LRV_XMM1_OFFSET(%rsp), %xmm1
+
+1:	fldt LRV_ST1_OFFSET(%rsp)
+	fldt LRV_ST0_OFFSET(%rsp)
 
 	movq %rbx, %rsp
-	movq  (%rsp), %rbx
+	movq (%rsp), %rbx
 	cfi_restore(rbx)
 	cfi_def_cfa_register(%rsp)
 

Modified: fsf/trunk/libc/sysdeps/x86_64/elf/configure
==============================================================================
--- fsf/trunk/libc/sysdeps/x86_64/elf/configure (original)
+++ fsf/trunk/libc/sysdeps/x86_64/elf/configure Thu Jul 16 00:09:33 2009
@@ -79,3 +79,28 @@
 #define PI_STATIC_AND_HIDDEN 1
 _ACEOF
 
+
+{ $as_echo "$as_me:$LINENO: checking for AVX support" >&5
+$as_echo_n "checking for AVX support... " >&6; }
+if test "${libc_cv_cc_avx+set}" = set; then
+  $as_echo_n "(cached) " >&6
+else
+  if { ac_try='${CC-cc} -mavx -xc /dev/null -S -o /dev/null'
+  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+  (eval $ac_try) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+  (exit $ac_status); }; }; then
+  libc_cv_cc_avx=yes
+else
+  libc_cv_cc_avx=no
+fi
+fi
+{ $as_echo "$as_me:$LINENO: result: $libc_cv_cc_avx" >&5
+$as_echo "$libc_cv_cc_avx" >&6; }
+if test $libc_cv_cc_avx = yes; then
+  cat >>confdefs.h <<\_ACEOF
+#define HAVE_AVX_SUPPORT 1
+_ACEOF
+
+fi

Modified: fsf/trunk/libc/sysdeps/x86_64/elf/configure.in
==============================================================================
--- fsf/trunk/libc/sysdeps/x86_64/elf/configure.in (original)
+++ fsf/trunk/libc/sysdeps/x86_64/elf/configure.in Thu Jul 16 00:09:33 2009
@@ -32,3 +32,14 @@
 dnl It is always possible to access static and hidden symbols in an
 dnl position independent way.
 AC_DEFINE(PI_STATIC_AND_HIDDEN)
+
+dnl Check if -mavx works.
+AC_CACHE_CHECK(for AVX support, libc_cv_cc_avx, [dnl
+if AC_TRY_COMMAND([${CC-cc} -mavx -xc /dev/null -S -o /dev/null]); then
+  libc_cv_cc_avx=yes
+else
+  libc_cv_cc_avx=no
+fi])
+if test $libc_cv_cc_avx = yes; then
+  AC_DEFINE(HAVE_AVX_SUPPORT)
+fi

Added: fsf/trunk/libc/sysdeps/x86_64/link-defines.sym
==============================================================================
--- fsf/trunk/libc/sysdeps/x86_64/link-defines.sym (added)
+++ fsf/trunk/libc/sysdeps/x86_64/link-defines.sym Thu Jul 16 00:09:33 2009
@@ -1,0 +1,28 @@
+#include "link.h"
+#include <stddef.h>
+
+--
+VECTOR_SIZE		sizeof (La_x86_64_vector)
+XMM_SIZE		sizeof (La_x86_64_xmm)
+
+LR_SIZE			sizeof (struct La_x86_64_regs)
+LR_RDX_OFFSET		offsetof (struct La_x86_64_regs, lr_rdx)
+LR_R8_OFFSET		offsetof (struct La_x86_64_regs, lr_r8)
+LR_R9_OFFSET		offsetof (struct La_x86_64_regs, lr_r9)
+LR_RCX_OFFSET		offsetof (struct La_x86_64_regs, lr_rcx)
+LR_RSI_OFFSET		offsetof (struct La_x86_64_regs, lr_rsi)
+LR_RDI_OFFSET		offsetof (struct La_x86_64_regs, lr_rdi)
+LR_RBP_OFFSET		offsetof (struct La_x86_64_regs, lr_rbp)
+LR_RSP_OFFSET		offsetof (struct La_x86_64_regs, lr_rsp)
+LR_XMM_OFFSET		offsetof (struct La_x86_64_regs, lr_xmm)
+LR_VECTOR_OFFSET	offsetof (struct La_x86_64_regs, lr_vector)
+
+LRV_SIZE		sizeof (struct La_x86_64_retval)
+LRV_RAX_OFFSET		offsetof (struct La_x86_64_retval, lrv_rax)
+LRV_RDX_OFFSET		offsetof (struct La_x86_64_retval, lrv_rdx)
+LRV_XMM0_OFFSET		offsetof (struct La_x86_64_retval, lrv_xmm0)
+LRV_XMM1_OFFSET		offsetof (struct La_x86_64_retval, lrv_xmm1)
+LRV_ST0_OFFSET		offsetof (struct La_x86_64_retval, lrv_st0)
+LRV_ST1_OFFSET		offsetof (struct La_x86_64_retval, lrv_st1)
+LRV_VECTOR0_OFFSET	offsetof (struct La_x86_64_retval, lrv_vector0)
+LRV_VECTOR1_OFFSET	offsetof (struct La_x86_64_retval, lrv_vector1)