[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Commits] r24926 - in /fsf/trunk/libc: ./ malloc/ sysdeps/ieee754/ldbl-128ibm/



Author: eglibc
Date: Fri Jan  3 00:01:54 2014
New Revision: 24926

Log:
Import glibc-mainline for 2014-01-03

Modified:
    fsf/trunk/libc/ChangeLog
    fsf/trunk/libc/NEWS
    fsf/trunk/libc/malloc/arena.c
    fsf/trunk/libc/malloc/hooks.c
    fsf/trunk/libc/malloc/malloc.c
    fsf/trunk/libc/malloc/malloc.h
    fsf/trunk/libc/malloc/mallocbug.c
    fsf/trunk/libc/malloc/mcheck.c
    fsf/trunk/libc/malloc/mcheck.h
    fsf/trunk/libc/malloc/memusage.c
    fsf/trunk/libc/malloc/memusagestat.c
    fsf/trunk/libc/malloc/morecore.c
    fsf/trunk/libc/malloc/mtrace.c
    fsf/trunk/libc/malloc/obstack.c
    fsf/trunk/libc/malloc/obstack.h
    fsf/trunk/libc/malloc/set-freeres.c
    fsf/trunk/libc/malloc/tst-mallocstate.c
    fsf/trunk/libc/malloc/tst-mtrace.c
    fsf/trunk/libc/malloc/tst-realloc.c
    fsf/trunk/libc/sysdeps/ieee754/ldbl-128ibm/e_acoshl.c
    fsf/trunk/libc/sysdeps/ieee754/ldbl-128ibm/e_logl.c
    fsf/trunk/libc/sysdeps/ieee754/ldbl-128ibm/s_asinhl.c

Modified: fsf/trunk/libc/ChangeLog
==============================================================================
--- fsf/trunk/libc/ChangeLog (original)
+++ fsf/trunk/libc/ChangeLog Fri Jan  3 00:01:54 2014
@@ -1,3 +1,52 @@
+2014-01-02  Joseph Myers  <joseph@xxxxxxxxxxxxxxxx>
+
+	[BZ #16386]
+	* sysdeps/ieee754/ldbl-128ibm/e_logl.c (__ieee754_logl): Adjust
+	numbers with subnormal high part when calculating exponent.
+
+	[BZ #16385]
+	* sysdeps/ieee754/ldbl-128ibm/s_asinhl.c (__asinhl): Use fabsl not
+	fabs.
+
+	[BZ #16384]
+	* sysdeps/ieee754/ldbl-128ibm/e_acoshl.c (ln2): Initialize with
+	M_LN2l.
+	(__ieee754_acoshl): Use __log1pl not __log1p.
+
+2013-01-02  OndÃÂej BÃÂlka  <neleai@xxxxxxxxx>
+
+	* malloc/arena.c (malloc_atfork, free_atfork, ptmalloc_lock_all,
+	ptmalloc_unlock_all, ptmalloc_unlock_all2, next_env_entry,
+	__failing_morecore, ptmalloc_init, dump_heap, new_heap, grow_heap,
+	heap_trim, _int_new_arena, get_free_list, reused_arena, arena_get2):
+	Convert to GNU style.
+	* malloc/hooks.c (memalign_hook_ini, __malloc_check_init,
+	mem2mem_check, mem2chunk_check, top_check, realloc_check,
+	memalign_check, __malloc_set_state): Likewise.
+	* malloc/mallocbug.c (main): Likewise.
+	* malloc/malloc.c (__malloc_assert, malloc_init_state, free_perturb,
+	do_check_malloced_chunk, do_check_malloc_state, sysmalloc, systrim,
+	mremap_chunk, __libc_malloc, __libc_free, __libc_realloc, _mid_memalign,
+	_int_malloc, malloc_consolidate, _int_realloc, _int_memalign, mtrim,
+	musable, __libc_mallopt, __posix_memalign, malloc_info): Likewise.
+	* malloc/malloc.h: Likewise.
+	* malloc/mcheck.c (checkhdr, unlink_blk, link_blk, freehook, mallochook,
+	memalignhook, reallochook, mabort): Likewise.
+	* malloc/mcheck.h: Likewise.
+	* malloc/memusage.c (update_data, me, malloc, realloc, calloc, free, mmap,
+	mmap64, mremap, munmap, dest): Likewise.
+	* malloc/memusagestat.c (main, parse_opt, more_help): Likewise.
+	* malloc/morecore.c (__default_morecore): Likewise.
+	* malloc/mtrace.c (tr_break, lock_and_info, mtrace): Likewise.
+	* malloc/obstack.c (_obstack_begin, _obstack_newchunk,
+	_obstack_allocated_p, obstack_free, _obstack_memory_used,
+	print_and_abort): Likewise.
+	* malloc/obstack.h: Likewise.
+	* malloc/set-freeres.c (__libc_freeres): Likewise.
+	* malloc/tst-mallocstate.c (main): Likewise.
+	* malloc/tst-mtrace.c (main): Likewise.
+	* malloc/tst-realloc.c (do_test): Likewise.
+
 2013-01-02  Siddhesh Poyarekar  <siddhesh@xxxxxxxxxx>
 
 	[BZ #16366]

Modified: fsf/trunk/libc/NEWS
==============================================================================
--- fsf/trunk/libc/NEWS (original)
+++ fsf/trunk/libc/NEWS Fri Jan  3 00:01:54 2014
@@ -23,7 +23,8 @@
   16038, 16041, 16055, 16071, 16072, 16074, 16077, 16078, 16103, 16112,
   16143, 16144, 16146, 16150, 16151, 16153, 16167, 16172, 16195, 16214,
   16245, 16271, 16274, 16283, 16289, 16293, 16314, 16316, 16330, 16337,
-  16338, 16356, 16365, 16366, 16369, 16372, 16375, 16379.
+  16338, 16356, 16365, 16366, 16369, 16372, 16375, 16379, 16384, 16385,
+  16386.
 
 * Slovenian translations for glibc messages have been contributed by the
   Translation Project's Slovenian team of translators.

Modified: fsf/trunk/libc/malloc/arena.c
==============================================================================
--- fsf/trunk/libc/malloc/arena.c (original)
+++ fsf/trunk/libc/malloc/arena.c Fri Jan  3 00:01:54 2014
@@ -21,12 +21,12 @@
 
 /* Compile-time constants.  */
 
-#define HEAP_MIN_SIZE (32*1024)
+#define HEAP_MIN_SIZE (32 * 1024)
 #ifndef HEAP_MAX_SIZE
 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
 #  define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
 # else
-#  define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
+#  define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */
 # endif
 #endif
 
@@ -39,7 +39,7 @@
 
 
 #ifndef THREAD_STATS
-#define THREAD_STATS 0
+# define THREAD_STATS 0
 #endif
 
 /* If THREAD_STATS is non-zero, some statistics on mutex locking are
@@ -53,12 +53,13 @@
    malloc_chunks.  It is allocated with mmap() and always starts at an
    address aligned to HEAP_MAX_SIZE.  */
 
-typedef struct _heap_info {
+typedef struct _heap_info
+{
   mstate ar_ptr; /* Arena for this heap. */
   struct _heap_info *prev; /* Previous heap. */
   size_t size;   /* Current size in bytes. */
-  size_t mprotect_size;	/* Size in bytes that has been mprotected
-			   PROT_READ|PROT_WRITE.  */
+  size_t mprotect_size; /* Size in bytes that has been mprotected
+                           PROT_READ|PROT_WRITE.  */
   /* Make sure the following data is properly aligned, particularly
      that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
      MALLOC_ALIGNMENT. */
@@ -68,8 +69,8 @@
 /* Get a compile-time error if the heap_info padding is not correct
    to make alignment work as expected in sYSMALLOc.  */
 extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
-					     + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
-					    ? -1 : 1];
+                                             + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
+                                            ? -1 : 1];
 
 /* Thread specific data */
 
@@ -80,9 +81,9 @@
 
 #if THREAD_STATS
 static int stat_n_heaps;
-#define THREAD_STAT(x) x
+# define THREAD_STAT(x) x
 #else
-#define THREAD_STAT(x) do ; while(0)
+# define THREAD_STAT(x) do ; while (0)
 #endif
 
 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
@@ -103,28 +104,28 @@
    in the new arena. */
 
 #define arena_get(ptr, size) do { \
-  arena_lookup(ptr); \
-  arena_lock(ptr, size); \
-} while(0)
+      arena_lookup (ptr);						      \
+      arena_lock (ptr, size);						      \
+  } while (0)
 
 #define arena_lookup(ptr) do { \
-  void *vptr = NULL; \
-  ptr = (mstate)tsd_getspecific(arena_key, vptr); \
-} while(0)
-
-# define arena_lock(ptr, size) do { \
-  if(ptr) \
-    (void)mutex_lock(&ptr->mutex); \
-  else \
-    ptr = arena_get2(ptr, (size), NULL); \
-} while(0)
+      void *vptr = NULL;						      \
+      ptr = (mstate) tsd_getspecific (arena_key, vptr);			      \
+  } while (0)
+
+#define arena_lock(ptr, size) do {					      \
+      if (ptr)								      \
+        (void) mutex_lock (&ptr->mutex);				      \
+      else								      \
+        ptr = arena_get2 (ptr, (size), NULL);				      \
+  } while (0)
 
 /* find the heap and corresponding arena for a given ptr */
 
 #define heap_for_ptr(ptr) \
- ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
+  ((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1)))
 #define arena_for_chunk(ptr) \
- (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
+  (chunk_non_main_arena (ptr) ? heap_for_ptr (ptr)->ar_ptr : &main_arena)
 
 
 /**************************************************************************/
@@ -133,51 +134,58 @@
 
 /* atfork support.  */
 
-static void *(*save_malloc_hook) (size_t __size, const void *);
+static void *(*save_malloc_hook)(size_t __size, const void *);
 static void (*save_free_hook) (void *__ptr, const void *);
 static void *save_arena;
 
-#ifdef ATFORK_MEM
+# ifdef ATFORK_MEM
 ATFORK_MEM;
-#endif
+# endif
 
 /* Magic value for the thread-specific arena pointer when
    malloc_atfork() is in use.  */
 
-#define ATFORK_ARENA_PTR ((void*)-1)
+# define ATFORK_ARENA_PTR ((void *) -1)
 
 /* The following hooks are used while the `atfork' handling mechanism
    is active. */
 
-static void*
-malloc_atfork(size_t sz, const void *caller)
+static void *
+malloc_atfork (size_t sz, const void *caller)
 {
   void *vptr = NULL;
   void *victim;
 
-  tsd_getspecific(arena_key, vptr);
-  if(vptr == ATFORK_ARENA_PTR) {
-    /* We are the only thread that may allocate at all.  */
-    if(save_malloc_hook != malloc_check) {
-      return _int_malloc(&main_arena, sz);
-    } else {
-      if(top_check()<0)
-	return 0;
-      victim = _int_malloc(&main_arena, sz+1);
-      return mem2mem_check(victim, sz);
-    }
-  } else {
-    /* Suspend the thread until the `atfork' handlers have completed.
-       By that time, the hooks will have been reset as well, so that
-       mALLOc() can be used again. */
-    (void)mutex_lock(&list_lock);
-    (void)mutex_unlock(&list_lock);
-    return __libc_malloc(sz);
-  }
+  tsd_getspecific (arena_key, vptr);
+  if (vptr == ATFORK_ARENA_PTR)
+    {
+      /* We are the only thread that may allocate at all.  */
+      if (save_malloc_hook != malloc_check)
+        {
+          return _int_malloc (&main_arena, sz);
+        }
+      else
+        {
+          if (top_check () < 0)
+            return 0;
+
+          victim = _int_malloc (&main_arena, sz + 1);
+          return mem2mem_check (victim, sz);
+        }
+    }
+  else
+    {
+      /* Suspend the thread until the `atfork' handlers have completed.
+         By that time, the hooks will have been reset as well, so that
+         mALLOc() can be used again. */
+      (void) mutex_lock (&list_lock);
+      (void) mutex_unlock (&list_lock);
+      return __libc_malloc (sz);
+    }
 }
 
 static void
-free_atfork(void* mem, const void *caller)
+free_atfork (void *mem, const void *caller)
 {
   void *vptr = NULL;
   mstate ar_ptr;
@@ -186,17 +194,17 @@
   if (mem == 0)                              /* free(0) has no effect */
     return;
 
-  p = mem2chunk(mem);         /* do not bother to replicate free_check here */
-
-  if (chunk_is_mmapped(p))                       /* release mmapped memory. */
-  {
-    munmap_chunk(p);
-    return;
-  }
-
-  ar_ptr = arena_for_chunk(p);
-  tsd_getspecific(arena_key, vptr);
-  _int_free(ar_ptr, p, vptr == ATFORK_ARENA_PTR);
+  p = mem2chunk (mem);         /* do not bother to replicate free_check here */
+
+  if (chunk_is_mmapped (p))                       /* release mmapped memory. */
+    {
+      munmap_chunk (p);
+      return;
+    }
+
+  ar_ptr = arena_for_chunk (p);
+  tsd_getspecific (arena_key, vptr);
+  _int_free (ar_ptr, p, vptr == ATFORK_ARENA_PTR);
 }
 
 
@@ -214,33 +222,36 @@
 {
   mstate ar_ptr;
 
-  if(__malloc_initialized < 1)
+  if (__malloc_initialized < 1)
     return;
-  if (mutex_trylock(&list_lock))
+
+  if (mutex_trylock (&list_lock))
     {
       void *my_arena;
-      tsd_getspecific(arena_key, my_arena);
+      tsd_getspecific (arena_key, my_arena);
       if (my_arena == ATFORK_ARENA_PTR)
-	/* This is the same thread which already locks the global list.
-	   Just bump the counter.  */
-	goto out;
+        /* This is the same thread which already locks the global list.
+           Just bump the counter.  */
+        goto out;
 
       /* This thread has to wait its turn.  */
-      (void)mutex_lock(&list_lock);
-    }
-  for(ar_ptr = &main_arena;;) {
-    (void)mutex_lock(&ar_ptr->mutex);
-    ar_ptr = ar_ptr->next;
-    if(ar_ptr == &main_arena) break;
-  }
+      (void) mutex_lock (&list_lock);
+    }
+  for (ar_ptr = &main_arena;; )
+    {
+      (void) mutex_lock (&ar_ptr->mutex);
+      ar_ptr = ar_ptr->next;
+      if (ar_ptr == &main_arena)
+        break;
+    }
   save_malloc_hook = __malloc_hook;
   save_free_hook = __free_hook;
   __malloc_hook = malloc_atfork;
   __free_hook = free_atfork;
   /* Only the current thread may perform malloc/free calls now. */
-  tsd_getspecific(arena_key, save_arena);
-  tsd_setspecific(arena_key, ATFORK_ARENA_PTR);
- out:
+  tsd_getspecific (arena_key, save_arena);
+  tsd_setspecific (arena_key, ATFORK_ARENA_PTR);
+out:
   ++atfork_recursive_cntr;
 }
 
@@ -249,19 +260,23 @@
 {
   mstate ar_ptr;
 
-  if(__malloc_initialized < 1)
+  if (__malloc_initialized < 1)
     return;
+
   if (--atfork_recursive_cntr != 0)
     return;
-  tsd_setspecific(arena_key, save_arena);
+
+  tsd_setspecific (arena_key, save_arena);
   __malloc_hook = save_malloc_hook;
   __free_hook = save_free_hook;
-  for(ar_ptr = &main_arena;;) {
-    (void)mutex_unlock(&ar_ptr->mutex);
-    ar_ptr = ar_ptr->next;
-    if(ar_ptr == &main_arena) break;
-  }
-  (void)mutex_unlock(&list_lock);
+  for (ar_ptr = &main_arena;; )
+    {
+      (void) mutex_unlock (&ar_ptr->mutex);
+      ar_ptr = ar_ptr->next;
+      if (ar_ptr == &main_arena)
+        break;
+    }
+  (void) mutex_unlock (&list_lock);
 }
 
 # ifdef __linux__
@@ -276,31 +291,33 @@
 {
   mstate ar_ptr;
 
-  if(__malloc_initialized < 1)
+  if (__malloc_initialized < 1)
     return;
-  tsd_setspecific(arena_key, save_arena);
+
+  tsd_setspecific (arena_key, save_arena);
   __malloc_hook = save_malloc_hook;
   __free_hook = save_free_hook;
   free_list = NULL;
-  for(ar_ptr = &main_arena;;) {
-    mutex_init(&ar_ptr->mutex);
-    if (ar_ptr != save_arena) {
-      ar_ptr->next_free = free_list;
-      free_list = ar_ptr;
-    }
-    ar_ptr = ar_ptr->next;
-    if(ar_ptr == &main_arena) break;
-  }
-  mutex_init(&list_lock);
+  for (ar_ptr = &main_arena;; )
+    {
+      mutex_init (&ar_ptr->mutex);
+      if (ar_ptr != save_arena)
+        {
+          ar_ptr->next_free = free_list;
+          free_list = ar_ptr;
+        }
+      ar_ptr = ar_ptr->next;
+      if (ar_ptr == &main_arena)
+        break;
+    }
+  mutex_init (&list_lock);
   atfork_recursive_cntr = 0;
 }
 
 # else
 
 #  define ptmalloc_unlock_all2 ptmalloc_unlock_all
-
 # endif
-
 #endif  /* !NO_THREADS */
 
 /* Initialization routine. */
@@ -317,20 +334,20 @@
   while (*current != NULL)
     {
       if (__builtin_expect ((*current)[0] == 'M', 0)
-	  && (*current)[1] == 'A'
-	  && (*current)[2] == 'L'
-	  && (*current)[3] == 'L'
-	  && (*current)[4] == 'O'
-	  && (*current)[5] == 'C'
-	  && (*current)[6] == '_')
-	{
-	  result = &(*current)[7];
-
-	  /* Save current position for next visit.  */
-	  *position = ++current;
-
-	  break;
-	}
+          && (*current)[1] == 'A'
+          && (*current)[2] == 'L'
+          && (*current)[3] == 'L'
+          && (*current)[4] == 'O'
+          && (*current)[5] == 'C'
+          && (*current)[6] == '_')
+        {
+          result = &(*current)[7];
+
+          /* Save current position for next visit.  */
+          *position = ++current;
+
+          break;
+        }
 
       ++current;
     }
@@ -353,7 +370,9 @@
 static void
 ptmalloc_init (void)
 {
-  if(__malloc_initialized >= 0) return;
+  if (__malloc_initialized >= 0)
+    return;
+
   __malloc_initialized = 0;
 
 #ifdef SHARED
@@ -364,13 +383,13 @@
 
   if (_dl_open_hook != NULL
       || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
-	  && l->l_ns != LM_ID_BASE))
+          && l->l_ns != LM_ID_BASE))
     __morecore = __failing_morecore;
 #endif
 
-  tsd_key_create(&arena_key, NULL);
-  tsd_setspecific(arena_key, (void *)&main_arena);
-  thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
+  tsd_key_create (&arena_key, NULL);
+  tsd_setspecific (arena_key, (void *) &main_arena);
+  thread_atfork (ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
   const char *s = NULL;
   if (__builtin_expect (_environ != NULL, 1))
     {
@@ -378,66 +397,67 @@
       char *envline;
 
       while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
-			       0))
-	{
-	  size_t len = strcspn (envline, "=");
-
-	  if (envline[len] != '=')
-	    /* This is a "MALLOC_" variable at the end of the string
-	       without a '=' character.  Ignore it since otherwise we
-	       will access invalid memory below.  */
-	    continue;
-
-	  switch (len)
-	    {
-	    case 6:
-	      if (memcmp (envline, "CHECK_", 6) == 0)
-		s = &envline[7];
-	      break;
-	    case 8:
-	      if (! __builtin_expect (__libc_enable_secure, 0))
-		{
-		  if (memcmp (envline, "TOP_PAD_", 8) == 0)
-		    __libc_mallopt(M_TOP_PAD, atoi(&envline[9]));
-		  else if (memcmp (envline, "PERTURB_", 8) == 0)
-		    __libc_mallopt(M_PERTURB, atoi(&envline[9]));
-		}
-	      break;
-	    case 9:
-	      if (! __builtin_expect (__libc_enable_secure, 0))
-		{
-		  if (memcmp (envline, "MMAP_MAX_", 9) == 0)
-		    __libc_mallopt(M_MMAP_MAX, atoi(&envline[10]));
-		  else if (memcmp (envline, "ARENA_MAX", 9) == 0)
-		    __libc_mallopt(M_ARENA_MAX, atoi(&envline[10]));
-		}
-	      break;
-	    case 10:
-	      if (! __builtin_expect (__libc_enable_secure, 0))
-		{
-		  if (memcmp (envline, "ARENA_TEST", 10) == 0)
-		    __libc_mallopt(M_ARENA_TEST, atoi(&envline[11]));
-		}
-	      break;
-	    case 15:
-	      if (! __builtin_expect (__libc_enable_secure, 0))
-		{
-		  if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
-		    __libc_mallopt(M_TRIM_THRESHOLD, atoi(&envline[16]));
-		  else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
-		    __libc_mallopt(M_MMAP_THRESHOLD, atoi(&envline[16]));
-		}
-	      break;
-	    default:
-	      break;
-	    }
-	}
-    }
-  if(s && s[0]) {
-    __libc_mallopt(M_CHECK_ACTION, (int)(s[0] - '0'));
-    if (check_action != 0)
-      __malloc_check_init();
-  }
+                               0))
+        {
+          size_t len = strcspn (envline, "=");
+
+          if (envline[len] != '=')
+            /* This is a "MALLOC_" variable at the end of the string
+               without a '=' character.  Ignore it since otherwise we
+               will access invalid memory below.  */
+            continue;
+
+          switch (len)
+            {
+            case 6:
+              if (memcmp (envline, "CHECK_", 6) == 0)
+                s = &envline[7];
+              break;
+            case 8:
+              if (!__builtin_expect (__libc_enable_secure, 0))
+                {
+                  if (memcmp (envline, "TOP_PAD_", 8) == 0)
+                    __libc_mallopt (M_TOP_PAD, atoi (&envline[9]));
+                  else if (memcmp (envline, "PERTURB_", 8) == 0)
+                    __libc_mallopt (M_PERTURB, atoi (&envline[9]));
+                }
+              break;
+            case 9:
+              if (!__builtin_expect (__libc_enable_secure, 0))
+                {
+                  if (memcmp (envline, "MMAP_MAX_", 9) == 0)
+                    __libc_mallopt (M_MMAP_MAX, atoi (&envline[10]));
+                  else if (memcmp (envline, "ARENA_MAX", 9) == 0)
+                    __libc_mallopt (M_ARENA_MAX, atoi (&envline[10]));
+                }
+              break;
+            case 10:
+              if (!__builtin_expect (__libc_enable_secure, 0))
+                {
+                  if (memcmp (envline, "ARENA_TEST", 10) == 0)
+                    __libc_mallopt (M_ARENA_TEST, atoi (&envline[11]));
+                }
+              break;
+            case 15:
+              if (!__builtin_expect (__libc_enable_secure, 0))
+                {
+                  if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
+                    __libc_mallopt (M_TRIM_THRESHOLD, atoi (&envline[16]));
+                  else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
+                    __libc_mallopt (M_MMAP_THRESHOLD, atoi (&envline[16]));
+                }
+              break;
+            default:
+              break;
+            }
+        }
+    }
+  if (s && s[0])
+    {
+      __libc_mallopt (M_CHECK_ACTION, (int) (s[0] - '0'));
+      if (check_action != 0)
+        __malloc_check_init ();
+    }
   void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook);
   if (hook != NULL)
     (*hook)();
@@ -446,11 +466,11 @@
 
 /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
 #ifdef thread_atfork_static
-thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
-		     ptmalloc_unlock_all2)
+thread_atfork_static (ptmalloc_lock_all, ptmalloc_unlock_all,		      \
+                      ptmalloc_unlock_all2)
 #endif
 
-
+
 
 /* Managing heaps and arenas (for concurrent threads) */
 
@@ -459,30 +479,33 @@
 /* Print the complete contents of a single heap to stderr. */
 
 static void
-dump_heap(heap_info *heap)
+dump_heap (heap_info *heap)
 {
   char *ptr;
   mchunkptr p;
 
-  fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size);
-  ptr = (heap->ar_ptr != (mstate)(heap+1)) ?
-    (char*)(heap + 1) : (char*)(heap + 1) + sizeof(struct malloc_state);
-  p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) &
-		  ~MALLOC_ALIGN_MASK);
-  for(;;) {
-    fprintf(stderr, "chunk %p size %10lx", p, (long)p->size);
-    if(p == top(heap->ar_ptr)) {
-      fprintf(stderr, " (top)\n");
-      break;
-    } else if(p->size == (0|PREV_INUSE)) {
-      fprintf(stderr, " (fence)\n");
-      break;
-    }
-    fprintf(stderr, "\n");
-    p = next_chunk(p);
-  }
-}
-
+  fprintf (stderr, "Heap %p, size %10lx:\n", heap, (long) heap->size);
+  ptr = (heap->ar_ptr != (mstate) (heap + 1)) ?
+        (char *) (heap + 1) : (char *) (heap + 1) + sizeof (struct malloc_state);
+  p = (mchunkptr) (((unsigned long) ptr + MALLOC_ALIGN_MASK) &
+                   ~MALLOC_ALIGN_MASK);
+  for (;; )
+    {
+      fprintf (stderr, "chunk %p size %10lx", p, (long) p->size);
+      if (p == top (heap->ar_ptr))
+        {
+          fprintf (stderr, " (top)\n");
+          break;
+        }
+      else if (p->size == (0 | PREV_INUSE))
+        {
+          fprintf (stderr, " (fence)\n");
+          break;
+        }
+      fprintf (stderr, "\n");
+      p = next_chunk (p);
+    }
+}
 #endif /* MALLOC_DEBUG > 1 */
 
 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
@@ -500,18 +523,18 @@
 
 static heap_info *
 internal_function
-new_heap(size_t size, size_t top_pad)
-{
-  size_t page_mask = GLRO(dl_pagesize) - 1;
+new_heap (size_t size, size_t top_pad)
+{
+  size_t page_mask = GLRO (dl_pagesize) - 1;
   char *p1, *p2;
   unsigned long ul;
   heap_info *h;
 
-  if(size+top_pad < HEAP_MIN_SIZE)
+  if (size + top_pad < HEAP_MIN_SIZE)
     size = HEAP_MIN_SIZE;
-  else if(size+top_pad <= HEAP_MAX_SIZE)
+  else if (size + top_pad <= HEAP_MAX_SIZE)
     size += top_pad;
-  else if(size > HEAP_MAX_SIZE)
+  else if (size > HEAP_MAX_SIZE)
     return 0;
   else
     size = HEAP_MAX_SIZE;
@@ -522,46 +545,55 @@
      mapping (on Linux, this is the case for all non-writable mappings
      anyway). */
   p2 = MAP_FAILED;
-  if(aligned_heap_area) {
-    p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
-		      MAP_NORESERVE);
-    aligned_heap_area = NULL;
-    if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) {
-      __munmap(p2, HEAP_MAX_SIZE);
-      p2 = MAP_FAILED;
-    }
-  }
-  if(p2 == MAP_FAILED) {
-    p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_NORESERVE);
-    if(p1 != MAP_FAILED) {
-      p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1))
-		    & ~(HEAP_MAX_SIZE-1));
-      ul = p2 - p1;
-      if (ul)
-	__munmap(p1, ul);
+  if (aligned_heap_area)
+    {
+      p2 = (char *) MMAP (aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
+                          MAP_NORESERVE);
+      aligned_heap_area = NULL;
+      if (p2 != MAP_FAILED && ((unsigned long) p2 & (HEAP_MAX_SIZE - 1)))
+        {
+          __munmap (p2, HEAP_MAX_SIZE);
+          p2 = MAP_FAILED;
+        }
+    }
+  if (p2 == MAP_FAILED)
+    {
+      p1 = (char *) MMAP (0, HEAP_MAX_SIZE << 1, PROT_NONE, MAP_NORESERVE);
+      if (p1 != MAP_FAILED)
+        {
+          p2 = (char *) (((unsigned long) p1 + (HEAP_MAX_SIZE - 1))
+                         & ~(HEAP_MAX_SIZE - 1));
+          ul = p2 - p1;
+          if (ul)
+            __munmap (p1, ul);
+          else
+            aligned_heap_area = p2 + HEAP_MAX_SIZE;
+          __munmap (p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
+        }
       else
-	aligned_heap_area = p2 + HEAP_MAX_SIZE;
-      __munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
-    } else {
-      /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
-	 is already aligned. */
-      p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
-      if(p2 == MAP_FAILED)
-	return 0;
-      if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
-	__munmap(p2, HEAP_MAX_SIZE);
-	return 0;
-      }
-    }
-  }
-  if(__mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) {
-    __munmap(p2, HEAP_MAX_SIZE);
-    return 0;
-  }
-  h = (heap_info *)p2;
+        {
+          /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
+             is already aligned. */
+          p2 = (char *) MMAP (0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
+          if (p2 == MAP_FAILED)
+            return 0;
+
+          if ((unsigned long) p2 & (HEAP_MAX_SIZE - 1))
+            {
+              __munmap (p2, HEAP_MAX_SIZE);
+              return 0;
+            }
+        }
+    }
+  if (__mprotect (p2, size, PROT_READ | PROT_WRITE) != 0)
+    {
+      __munmap (p2, HEAP_MAX_SIZE);
+      return 0;
+    }
+  h = (heap_info *) p2;
   h->size = size;
   h->mprotect_size = size;
-  THREAD_STAT(stat_n_heaps++);
+  THREAD_STAT (stat_n_heaps++);
   LIBC_PROBE (memory_heap_new, 2, h, h->size);
   return h;
 }
@@ -570,22 +602,25 @@
    multiple of the page size. */
 
 static int
-grow_heap(heap_info *h, long diff)
-{
-  size_t page_mask = GLRO(dl_pagesize) - 1;
+grow_heap (heap_info *h, long diff)
+{
+  size_t page_mask = GLRO (dl_pagesize) - 1;
   long new_size;
 
   diff = (diff + page_mask) & ~page_mask;
-  new_size = (long)h->size + diff;
-  if((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
+  new_size = (long) h->size + diff;
+  if ((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
     return -1;
-  if((unsigned long) new_size > h->mprotect_size) {
-    if (__mprotect((char *)h + h->mprotect_size,
-		   (unsigned long) new_size - h->mprotect_size,
-		   PROT_READ|PROT_WRITE) != 0)
-      return -2;
-    h->mprotect_size = new_size;
-  }
+
+  if ((unsigned long) new_size > h->mprotect_size)
+    {
+      if (__mprotect ((char *) h + h->mprotect_size,
+                      (unsigned long) new_size - h->mprotect_size,
+                      PROT_READ | PROT_WRITE) != 0)
+        return -2;
+
+      h->mprotect_size = new_size;
+    }
 
   h->size = new_size;
   LIBC_PROBE (memory_heap_more, 2, h, h->size);
@@ -595,24 +630,26 @@
 /* Shrink a heap.  */
 
 static int
-shrink_heap(heap_info *h, long diff)
+shrink_heap (heap_info *h, long diff)
 {
   long new_size;
 
-  new_size = (long)h->size - diff;
-  if(new_size < (long)sizeof(*h))
+  new_size = (long) h->size - diff;
+  if (new_size < (long) sizeof (*h))
     return -1;
+
   /* Try to re-map the extra heap space freshly to save memory, and make it
      inaccessible.  See malloc-sysdep.h to know when this is true.  */
   if (__builtin_expect (check_may_shrink_heap (), 0))
     {
-      if((char *)MMAP((char *)h + new_size, diff, PROT_NONE,
-		      MAP_FIXED) == (char *) MAP_FAILED)
-	return -2;
+      if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE,
+                         MAP_FIXED) == (char *) MAP_FAILED)
+        return -2;
+
       h->mprotect_size = new_size;
     }
   else
-    __madvise ((char *)h + new_size, diff, MADV_DONTNEED);
+    __madvise ((char *) h + new_size, diff, MADV_DONTNEED);
   /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
 
   h->size = new_size;
@@ -623,66 +660,70 @@
 /* Delete a heap. */
 
 #define delete_heap(heap) \
-  do {								\
-    if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area)	\
-      aligned_heap_area = NULL;					\
-    __munmap((char*)(heap), HEAP_MAX_SIZE);			\
-  } while (0)
+  do {									      \
+      if ((char *) (heap) + HEAP_MAX_SIZE == aligned_heap_area)		      \
+        aligned_heap_area = NULL;					      \
+      __munmap ((char *) (heap), HEAP_MAX_SIZE);			      \
+    } while (0)
 
 static int
 internal_function
-heap_trim(heap_info *heap, size_t pad)
+heap_trim (heap_info *heap, size_t pad)
 {
   mstate ar_ptr = heap->ar_ptr;
-  unsigned long pagesz = GLRO(dl_pagesize);
-  mchunkptr top_chunk = top(ar_ptr), p, bck, fwd;
+  unsigned long pagesz = GLRO (dl_pagesize);
+  mchunkptr top_chunk = top (ar_ptr), p, bck, fwd;
   heap_info *prev_heap;
   long new_size, top_size, extra, prev_size, misalign;
 
   /* Can this heap go away completely? */
-  while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) {
-    prev_heap = heap->prev;
-    prev_size = prev_heap->size - (MINSIZE-2*SIZE_SZ);
-    p = chunk_at_offset(prev_heap, prev_size);
-    /* fencepost must be properly aligned.  */
-    misalign = ((long) p) & MALLOC_ALIGN_MASK;
-    p = chunk_at_offset(prev_heap, prev_size - misalign);
-    assert(p->size == (0|PREV_INUSE)); /* must be fencepost */
-    p = prev_chunk(p);
-    new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ) + misalign;
-    assert(new_size>0 && new_size<(long)(2*MINSIZE));
-    if(!prev_inuse(p))
-      new_size += p->prev_size;
-    assert(new_size>0 && new_size<HEAP_MAX_SIZE);
-    if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
-      break;
-    ar_ptr->system_mem -= heap->size;
-    arena_mem -= heap->size;
-    LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
-    delete_heap(heap);
-    heap = prev_heap;
-    if(!prev_inuse(p)) { /* consolidate backward */
-      p = prev_chunk(p);
-      unlink(p, bck, fwd);
-    }
-    assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0);
-    assert( ((char*)p + new_size) == ((char*)heap + heap->size) );
-    top(ar_ptr) = top_chunk = p;
-    set_head(top_chunk, new_size | PREV_INUSE);
-    /*check_chunk(ar_ptr, top_chunk);*/
-  }
-  top_size = chunksize(top_chunk);
+  while (top_chunk == chunk_at_offset (heap, sizeof (*heap)))
+    {
+      prev_heap = heap->prev;
+      prev_size = prev_heap->size - (MINSIZE - 2 * SIZE_SZ);
+      p = chunk_at_offset (prev_heap, prev_size);
+      /* fencepost must be properly aligned.  */
+      misalign = ((long) p) & MALLOC_ALIGN_MASK;
+      p = chunk_at_offset (prev_heap, prev_size - misalign);
+      assert (p->size == (0 | PREV_INUSE)); /* must be fencepost */
+      p = prev_chunk (p);
+      new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign;
+      assert (new_size > 0 && new_size < (long) (2 * MINSIZE));
+      if (!prev_inuse (p))
+        new_size += p->prev_size;
+      assert (new_size > 0 && new_size < HEAP_MAX_SIZE);
+      if (new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
+        break;
+      ar_ptr->system_mem -= heap->size;
+      arena_mem -= heap->size;
+      LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
+      delete_heap (heap);
+      heap = prev_heap;
+      if (!prev_inuse (p)) /* consolidate backward */
+        {
+          p = prev_chunk (p);
+          unlink (p, bck, fwd);
+        }
+      assert (((unsigned long) ((char *) p + new_size) & (pagesz - 1)) == 0);
+      assert (((char *) p + new_size) == ((char *) heap + heap->size));
+      top (ar_ptr) = top_chunk = p;
+      set_head (top_chunk, new_size | PREV_INUSE);
+      /*check_chunk(ar_ptr, top_chunk);*/
+    }
+  top_size = chunksize (top_chunk);
   extra = (top_size - pad - MINSIZE - 1) & ~(pagesz - 1);
-  if(extra < (long)pagesz)
+  if (extra < (long) pagesz)
     return 0;
+
   /* Try to shrink. */
-  if(shrink_heap(heap, extra) != 0)
+  if (shrink_heap (heap, extra) != 0)
     return 0;
+
   ar_ptr->system_mem -= extra;
   arena_mem -= extra;
 
   /* Success. Adjust top accordingly. */
-  set_head(top_chunk, (top_size - extra) | PREV_INUSE);
+  set_head (top_chunk, (top_size - extra) | PREV_INUSE);
   /*check_chunk(ar_ptr, top_chunk);*/
   return 1;
 }
@@ -690,52 +731,53 @@
 /* Create a new arena with initial size "size".  */
 
 static mstate
-_int_new_arena(size_t size)
+_int_new_arena (size_t size)
 {
   mstate a;
   heap_info *h;
   char *ptr;
   unsigned long misalign;
 
-  h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT),
-	       mp_.top_pad);
-  if(!h) {
-    /* Maybe size is too large to fit in a single heap.  So, just try
-       to create a minimally-sized arena and let _int_malloc() attempt
-       to deal with the large request via mmap_chunk().  */
-    h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad);
-    if(!h)
-      return 0;
-  }
-  a = h->ar_ptr = (mstate)(h+1);
-  malloc_init_state(a);
+  h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT),
+                mp_.top_pad);
+  if (!h)
+    {
+      /* Maybe size is too large to fit in a single heap.  So, just try
+         to create a minimally-sized arena and let _int_malloc() attempt
+         to deal with the large request via mmap_chunk().  */
+      h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad);
+      if (!h)
+        return 0;
+    }
+  a = h->ar_ptr = (mstate) (h + 1);
+  malloc_init_state (a);
   /*a->next = NULL;*/
   a->system_mem = a->max_system_mem = h->size;
   arena_mem += h->size;
 
   /* Set up the top chunk, with proper alignment. */
-  ptr = (char *)(a + 1);
-  misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
+  ptr = (char *) (a + 1);
+  misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK;
   if (misalign > 0)
     ptr += MALLOC_ALIGNMENT - misalign;
-  top(a) = (mchunkptr)ptr;
-  set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);
+  top (a) = (mchunkptr) ptr;
+  set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE);
 
   LIBC_PROBE (memory_arena_new, 2, a, size);
-  tsd_setspecific(arena_key, (void *)a);
-  mutex_init(&a->mutex);
-  (void)mutex_lock(&a->mutex);
-
-  (void)mutex_lock(&list_lock);
+  tsd_setspecific (arena_key, (void *) a);
+  mutex_init (&a->mutex);
+  (void) mutex_lock (&a->mutex);
+
+  (void) mutex_lock (&list_lock);
 
   /* Add the new arena to the global list.  */
   a->next = main_arena.next;
   atomic_write_barrier ();
   main_arena.next = a;
 
-  (void)mutex_unlock(&list_lock);
-
-  THREAD_STAT(++(a->stat_lock_loop));
+  (void) mutex_unlock (&list_lock);
+
+  THREAD_STAT (++(a->stat_lock_loop));
 
   return a;
 }
@@ -747,19 +789,19 @@
   mstate result = free_list;
   if (result != NULL)
     {
-      (void)mutex_lock(&list_lock);
+      (void) mutex_lock (&list_lock);
       result = free_list;
       if (result != NULL)
-	free_list = result->next_free;
-      (void)mutex_unlock(&list_lock);
+        free_list = result->next_free;
+      (void) mutex_unlock (&list_lock);
 
       if (result != NULL)
-	{
-	  LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
-	  (void)mutex_lock(&result->mutex);
-	  tsd_setspecific(arena_key, (void *)result);
-	  THREAD_STAT(++(result->stat_lock_loop));
-	}
+        {
+          LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
+          (void) mutex_lock (&result->mutex);
+          tsd_setspecific (arena_key, (void *) result);
+          THREAD_STAT (++(result->stat_lock_loop));
+        }
     }
 
   return result;
@@ -779,8 +821,8 @@
   result = next_to_use;
   do
     {
-      if (!mutex_trylock(&result->mutex))
-	goto out;
+      if (!mutex_trylock (&result->mutex))
+        goto out;
 
       result = result->next;
     }
@@ -793,12 +835,12 @@
 
   /* No arena available.  Wait for the next in line.  */
   LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
-  (void)mutex_lock(&result->mutex);
-
- out:
+  (void) mutex_lock (&result->mutex);
+
+out:
   LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
-  tsd_setspecific(arena_key, (void *)result);
-  THREAD_STAT(++(result->stat_lock_loop));
+  tsd_setspecific (arena_key, (void *) result);
+  THREAD_STAT (++(result->stat_lock_loop));
   next_to_use = result->next;
 
   return result;
@@ -806,7 +848,7 @@
 
 static mstate
 internal_function
-arena_get2(mstate a_tsd, size_t size, mstate avoid_arena)
+arena_get2 (mstate a_tsd, size_t size, mstate avoid_arena)
 {
   mstate a;
 
@@ -817,40 +859,40 @@
     {
       /* Nothing immediately available, so generate a new arena.  */
       if (narenas_limit == 0)
-	{
-	  if (mp_.arena_max != 0)
-	    narenas_limit = mp_.arena_max;
-	  else if (narenas > mp_.arena_test)
-	    {
-	      int n  = __get_nprocs ();
-
-	      if (n >= 1)
-		narenas_limit = NARENAS_FROM_NCORES (n);
-	      else
-		/* We have no information about the system.  Assume two
-		   cores.  */
-		narenas_limit = NARENAS_FROM_NCORES (2);
-	    }
-	}
+        {
+          if (mp_.arena_max != 0)
+            narenas_limit = mp_.arena_max;
+          else if (narenas > mp_.arena_test)
+            {
+              int n = __get_nprocs ();
+
+              if (n >= 1)
+                narenas_limit = NARENAS_FROM_NCORES (n);
+              else
+                /* We have no information about the system.  Assume two
+                   cores.  */
+                narenas_limit = NARENAS_FROM_NCORES (2);
+            }
+        }
     repeat:;
       size_t n = narenas;
       /* NB: the following depends on the fact that (size_t)0 - 1 is a
-	 very large number and that the underflow is OK.  If arena_max
-	 is set the value of arena_test is irrelevant.  If arena_test
-	 is set but narenas is not yet larger or equal to arena_test
-	 narenas_limit is 0.  There is no possibility for narenas to
-	 be too big for the test to always fail since there is not
-	 enough address space to create that many arenas.  */
+         very large number and that the underflow is OK.  If arena_max
+         is set the value of arena_test is irrelevant.  If arena_test
+         is set but narenas is not yet larger or equal to arena_test
+         narenas_limit is 0.  There is no possibility for narenas to
+         be too big for the test to always fail since there is not
+         enough address space to create that many arenas.  */
       if (__builtin_expect (n <= narenas_limit - 1, 0))
-	{
-	  if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
-	    goto repeat;
-	  a = _int_new_arena (size);
-	  if (__builtin_expect (a == NULL, 0))
-	    catomic_decrement (&narenas);
-	}
+        {
+          if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
+            goto repeat;
+          a = _int_new_arena (size);
+          if (__builtin_expect (a == NULL, 0))
+            catomic_decrement (&narenas);
+        }
       else
-	a = reused_arena (avoid_arena);
+        a = reused_arena (avoid_arena);
     }
   return a;
 }
@@ -863,16 +905,19 @@
 arena_get_retry (mstate ar_ptr, size_t bytes)
 {
   LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr);
-  if(ar_ptr != &main_arena) {
-    (void)mutex_unlock(&ar_ptr->mutex);
-    ar_ptr = &main_arena;
-    (void)mutex_lock(&ar_ptr->mutex);
-  } else {
-    /* Grab ar_ptr->next prior to releasing its lock.  */
-    mstate prev = ar_ptr->next ? ar_ptr : 0;
-    (void)mutex_unlock(&ar_ptr->mutex);
-    ar_ptr = arena_get2(prev, bytes, ar_ptr);
-  }
+  if (ar_ptr != &main_arena)
+    {
+      (void) mutex_unlock (&ar_ptr->mutex);
+      ar_ptr = &main_arena;
+      (void) mutex_lock (&ar_ptr->mutex);
+    }
+  else
+    {
+      /* Grab ar_ptr->next prior to releasing its lock.  */
+      mstate prev = ar_ptr->next ? ar_ptr : 0;
+      (void) mutex_unlock (&ar_ptr->mutex);
+      ar_ptr = arena_get2 (prev, bytes, ar_ptr);
+    }
 
   return ar_ptr;
 }
@@ -881,15 +926,15 @@
 arena_thread_freeres (void)
 {
   void *vptr = NULL;
-  mstate a = tsd_getspecific(arena_key, vptr);
-  tsd_setspecific(arena_key, NULL);
+  mstate a = tsd_getspecific (arena_key, vptr);
+  tsd_setspecific (arena_key, NULL);
 
   if (a != NULL)
     {
-      (void)mutex_lock(&list_lock);
+      (void) mutex_lock (&list_lock);
       a->next_free = free_list;
       free_list = a;
-      (void)mutex_unlock(&list_lock);
+      (void) mutex_unlock (&list_lock);
     }
 }
 text_set_element (__libc_thread_subfreeres, arena_thread_freeres);

Modified: fsf/trunk/libc/malloc/hooks.c
==============================================================================
--- fsf/trunk/libc/malloc/hooks.c (original)
+++ fsf/trunk/libc/malloc/hooks.c Fri Jan  3 00:01:54 2014
@@ -24,29 +24,29 @@
 /* Hooks for debugging versions.  The initial hooks just call the
    initialization routine, then do the normal work. */
 
-static void*
-malloc_hook_ini(size_t sz, const void *caller)
+static void *
+malloc_hook_ini (size_t sz, const void *caller)
 {
   __malloc_hook = NULL;
-  ptmalloc_init();
-  return __libc_malloc(sz);
-}
-
-static void*
-realloc_hook_ini(void* ptr, size_t sz, const void *caller)
+  ptmalloc_init ();
+  return __libc_malloc (sz);
+}
+
+static void *
+realloc_hook_ini (void *ptr, size_t sz, const void *caller)
 {
   __malloc_hook = NULL;
   __realloc_hook = NULL;
-  ptmalloc_init();
-  return __libc_realloc(ptr, sz);
-}
-
-static void*
-memalign_hook_ini(size_t alignment, size_t sz, const void *caller)
+  ptmalloc_init ();
+  return __libc_realloc (ptr, sz);
+}
+
+static void *
+memalign_hook_ini (size_t alignment, size_t sz, const void *caller)
 {
   __memalign_hook = NULL;
-  ptmalloc_init();
-  return __libc_memalign(alignment, sz);
+  ptmalloc_init ();
+  return __libc_memalign (alignment, sz);
 }
 
 /* Whether we are using malloc checking.  */
@@ -71,10 +71,11 @@
 void
 __malloc_check_init (void)
 {
-  if (disallow_malloc_check) {
-    disallow_malloc_check = 0;
-    return;
-  }
+  if (disallow_malloc_check)
+    {
+      disallow_malloc_check = 0;
+      return;
+    }
   using_malloc_checking = 1;
   __malloc_hook = malloc_check;
   __free_hook = free_check;
@@ -87,7 +88,7 @@
    overruns.  The goal here is to avoid obscure crashes due to invalid
    usage, unlike in the MALLOC_DEBUG code. */
 
-#define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
+#define MAGICBYTE(p) ((((size_t) p >> 3) ^ ((size_t) p >> 11)) & 0xFF)
 
 /* Visualize the chunk as being partitioned into blocks of 256 bytes from the
    highest address of the chunk, downwards.  The beginning of each block tells
@@ -96,53 +97,58 @@
    must reach it with this iteration, otherwise we have witnessed a memory
    corruption.  */
 static size_t
-malloc_check_get_size(mchunkptr p)
+malloc_check_get_size (mchunkptr p)
 {
   size_t size;
   unsigned char c;
-  unsigned char magic = MAGICBYTE(p);
-
-  assert(using_malloc_checking == 1);
-
-  for (size = chunksize(p) - 1 + (chunk_is_mmapped(p) ? 0 : SIZE_SZ);
-       (c = ((unsigned char*)p)[size]) != magic;
-       size -= c) {
-    if(c<=0 || size<(c+2*SIZE_SZ)) {
-      malloc_printerr(check_action, "malloc_check_get_size: memory corruption",
-		      chunk2mem(p));
-      return 0;
-    }
-  }
+  unsigned char magic = MAGICBYTE (p);
+
+  assert (using_malloc_checking == 1);
+
+  for (size = chunksize (p) - 1 + (chunk_is_mmapped (p) ? 0 : SIZE_SZ);
+       (c = ((unsigned char *) p)[size]) != magic;
+       size -= c)
+    {
+      if (c <= 0 || size < (c + 2 * SIZE_SZ))
+        {
+          malloc_printerr (check_action, "malloc_check_get_size: memory corruption",
+                           chunk2mem (p));
+          return 0;
+        }
+    }
 
   /* chunk2mem size.  */
-  return size - 2*SIZE_SZ;
+  return size - 2 * SIZE_SZ;
 }
 
 /* Instrument a chunk with overrun detector byte(s) and convert it
    into a user pointer with requested size sz. */
 
-static void*
+static void *
 internal_function
-mem2mem_check(void *ptr, size_t sz)
+mem2mem_check (void *ptr, size_t sz)
 {
   mchunkptr p;
-  unsigned char* m_ptr = ptr;
+  unsigned char *m_ptr = ptr;
   size_t i;
 
   if (!ptr)
     return ptr;
-  p = mem2chunk(ptr);
-  for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
-      i > sz;
-      i -= 0xFF) {
-    if(i-sz < 0x100) {
-      m_ptr[i] = (unsigned char)(i-sz);
-      break;
-    }
-    m_ptr[i] = 0xFF;
-  }
-  m_ptr[sz] = MAGICBYTE(p);
-  return (void*)m_ptr;
+
+  p = mem2chunk (ptr);
+  for (i = chunksize (p) - (chunk_is_mmapped (p) ? 2 * SIZE_SZ + 1 : SIZE_SZ + 1);
+       i > sz;
+       i -= 0xFF)
+    {
+      if (i - sz < 0x100)
+        {
+          m_ptr[i] = (unsigned char) (i - sz);
+          break;
+        }
+      m_ptr[i] = 0xFF;
+    }
+  m_ptr[sz] = MAGICBYTE (p);
+  return (void *) m_ptr;
 }
 
 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
@@ -150,53 +156,64 @@
 
 static mchunkptr
 internal_function
-mem2chunk_check(void* mem, unsigned char **magic_p)
+mem2chunk_check (void *mem, unsigned char **magic_p)
 {
   mchunkptr p;
   INTERNAL_SIZE_T sz, c;
   unsigned char magic;
 
-  if(!aligned_OK(mem)) return NULL;
-  p = mem2chunk(mem);
-  if (!chunk_is_mmapped(p)) {
-    /* Must be a chunk in conventional heap memory. */
-    int contig = contiguous(&main_arena);
-    sz = chunksize(p);
-    if((contig &&
-	((char*)p<mp_.sbrk_base ||
-	 ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) )) ||
-       sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
-       ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
-			    (contig && (char*)prev_chunk(p)<mp_.sbrk_base) ||
-			    next_chunk(prev_chunk(p))!=p) ))
-      return NULL;
-    magic = MAGICBYTE(p);
-    for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
-      if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
-    }
-  } else {
-    unsigned long offset, page_mask = GLRO(dl_pagesize)-1;
-
-    /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
-       alignment relative to the beginning of a page.  Check this
-       first. */
-    offset = (unsigned long)mem & page_mask;
-    if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
-	offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
-	offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
-	offset<0x2000) ||
-       !chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
-       ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
-       ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
-      return NULL;
-    magic = MAGICBYTE(p);
-    for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
-      if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
-    }
-  }
-  ((unsigned char*)p)[sz] ^= 0xFF;
+  if (!aligned_OK (mem))
+    return NULL;
+
+  p = mem2chunk (mem);
+  if (!chunk_is_mmapped (p))
+    {
+      /* Must be a chunk in conventional heap memory. */
+      int contig = contiguous (&main_arena);
+      sz = chunksize (p);
+      if ((contig &&
+           ((char *) p < mp_.sbrk_base ||
+            ((char *) p + sz) >= (mp_.sbrk_base + main_arena.system_mem))) ||
+          sz < MINSIZE || sz & MALLOC_ALIGN_MASK || !inuse (p) ||
+          (!prev_inuse (p) && (p->prev_size & MALLOC_ALIGN_MASK ||
+                               (contig && (char *) prev_chunk (p) < mp_.sbrk_base) ||
+                               next_chunk (prev_chunk (p)) != p)))
+        return NULL;
+
+      magic = MAGICBYTE (p);
+      for (sz += SIZE_SZ - 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
+        {
+          if (c <= 0 || sz < (c + 2 * SIZE_SZ))
+            return NULL;
+        }
+    }
+  else
+    {
+      unsigned long offset, page_mask = GLRO (dl_pagesize) - 1;
+
+      /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
+         alignment relative to the beginning of a page.  Check this
+         first. */
+      offset = (unsigned long) mem & page_mask;
+      if ((offset != MALLOC_ALIGNMENT && offset != 0 && offset != 0x10 &&
+           offset != 0x20 && offset != 0x40 && offset != 0x80 && offset != 0x100 &&
+           offset != 0x200 && offset != 0x400 && offset != 0x800 && offset != 0x1000 &&
+           offset < 0x2000) ||
+          !chunk_is_mmapped (p) || (p->size & PREV_INUSE) ||
+          ((((unsigned long) p - p->prev_size) & page_mask) != 0) ||
+          ((sz = chunksize (p)), ((p->prev_size + sz) & page_mask) != 0))
+        return NULL;
+
+      magic = MAGICBYTE (p);
+      for (sz -= 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
+        {
+          if (c <= 0 || sz < (c + 2 * SIZE_SZ))
+            return NULL;
+        }
+    }
+  ((unsigned char *) p)[sz] ^= 0xFF;
   if (magic_p)
-    *magic_p = (unsigned char *)p + sz;
+    *magic_p = (unsigned char *) p + sz;
   return p;
 }
 
@@ -205,32 +222,32 @@
 
 static int
 internal_function
-top_check(void)
-{
-  mchunkptr t = top(&main_arena);
-  char* brk, * new_brk;
+top_check (void)
+{
+  mchunkptr t = top (&main_arena);
+  char *brk, *new_brk;
   INTERNAL_SIZE_T front_misalign, sbrk_size;
-  unsigned long pagesz = GLRO(dl_pagesize);
-
-  if (t == initial_top(&main_arena) ||
-      (!chunk_is_mmapped(t) &&
-       chunksize(t)>=MINSIZE &&
-       prev_inuse(t) &&
-       (!contiguous(&main_arena) ||
-	(char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem)))
+  unsigned long pagesz = GLRO (dl_pagesize);
+
+  if (t == initial_top (&main_arena) ||
+      (!chunk_is_mmapped (t) &&
+       chunksize (t) >= MINSIZE &&
+       prev_inuse (t) &&
+       (!contiguous (&main_arena) ||
+        (char *) t + chunksize (t) == mp_.sbrk_base + main_arena.system_mem)))
     return 0;
 
   malloc_printerr (check_action, "malloc: top chunk is corrupt", t);
 
   /* Try to set up a new top chunk. */
-  brk = MORECORE(0);
-  front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
+  brk = MORECORE (0);
+  front_misalign = (unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK;
   if (front_misalign > 0)
     front_misalign = MALLOC_ALIGNMENT - front_misalign;
   sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
-  sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
-  new_brk = (char*)(MORECORE (sbrk_size));
-  if (new_brk == (char*)(MORECORE_FAILURE))
+  sbrk_size += pagesz - ((unsigned long) (brk + sbrk_size) & (pagesz - 1));
+  new_brk = (char *) (MORECORE (sbrk_size));
+  if (new_brk == (char *) (MORECORE_FAILURE))
     {
       __set_errno (ENOMEM);
       return -1;
@@ -238,128 +255,148 @@
   /* Call the `morecore' hook if necessary.  */
   void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
   if (hook)
-    (*hook) ();
+    (*hook)();
   main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
 
-  top(&main_arena) = (mchunkptr)(brk + front_misalign);
-  set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
+  top (&main_arena) = (mchunkptr) (brk + front_misalign);
+  set_head (top (&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
 
   return 0;
 }
 
-static void*
-malloc_check(size_t sz, const void *caller)
+static void *
+malloc_check (size_t sz, const void *caller)
 {
   void *victim;
 
-  if (sz+1 == 0) {
-    __set_errno (ENOMEM);
-    return NULL;
-  }
-
-  (void)mutex_lock(&main_arena.mutex);
-  victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL;
-  (void)mutex_unlock(&main_arena.mutex);
-  return mem2mem_check(victim, sz);
+  if (sz + 1 == 0)
+    {
+      __set_errno (ENOMEM);
+      return NULL;
+    }
+
+  (void) mutex_lock (&main_arena.mutex);
+  victim = (top_check () >= 0) ? _int_malloc (&main_arena, sz + 1) : NULL;
+  (void) mutex_unlock (&main_arena.mutex);
+  return mem2mem_check (victim, sz);
 }
 
 static void
-free_check(void* mem, const void *caller)
+free_check (void *mem, const void *caller)
 {
   mchunkptr p;
 
-  if(!mem) return;
-  (void)mutex_lock(&main_arena.mutex);
-  p = mem2chunk_check(mem, NULL);
-  if(!p) {
-    (void)mutex_unlock(&main_arena.mutex);
-
-    malloc_printerr(check_action, "free(): invalid pointer", mem);
+  if (!mem)
     return;
-  }
-  if (chunk_is_mmapped(p)) {
-    (void)mutex_unlock(&main_arena.mutex);
-    munmap_chunk(p);
-    return;
-  }
-  _int_free(&main_arena, p, 1);
-  (void)mutex_unlock(&main_arena.mutex);
-}
-
-static void*
-realloc_check(void* oldmem, size_t bytes, const void *caller)
+
+  (void) mutex_lock (&main_arena.mutex);
+  p = mem2chunk_check (mem, NULL);
+  if (!p)
+    {
+      (void) mutex_unlock (&main_arena.mutex);
+
+      malloc_printerr (check_action, "free(): invalid pointer", mem);
+      return;
+    }
+  if (chunk_is_mmapped (p))
+    {
+      (void) mutex_unlock (&main_arena.mutex);
+      munmap_chunk (p);
+      return;
+    }
+  _int_free (&main_arena, p, 1);
+  (void) mutex_unlock (&main_arena.mutex);
+}
+
+static void *
+realloc_check (void *oldmem, size_t bytes, const void *caller)
 {
   INTERNAL_SIZE_T nb;
-  void* newmem = 0;
+  void *newmem = 0;
   unsigned char *magic_p;
 
-  if (bytes+1 == 0) {
-    __set_errno (ENOMEM);
-    return NULL;
-  }
-  if (oldmem == 0) return malloc_check(bytes, NULL);
-  if (bytes == 0) {
-    free_check (oldmem, NULL);
-    return NULL;
-  }
-  (void)mutex_lock(&main_arena.mutex);
-  const mchunkptr oldp = mem2chunk_check(oldmem, &magic_p);
-  (void)mutex_unlock(&main_arena.mutex);
-  if(!oldp) {
-    malloc_printerr(check_action, "realloc(): invalid pointer", oldmem);
-    return malloc_check(bytes, NULL);
-  }
-  const INTERNAL_SIZE_T oldsize = chunksize(oldp);
-
-  checked_request2size(bytes+1, nb);
-  (void)mutex_lock(&main_arena.mutex);
-
-  if (chunk_is_mmapped(oldp)) {
+  if (bytes + 1 == 0)
+    {
+      __set_errno (ENOMEM);
+      return NULL;
+    }
+  if (oldmem == 0)
+    return malloc_check (bytes, NULL);
+
+  if (bytes == 0)
+    {
+      free_check (oldmem, NULL);
+      return NULL;
+    }
+  (void) mutex_lock (&main_arena.mutex);
+  const mchunkptr oldp = mem2chunk_check (oldmem, &magic_p);
+  (void) mutex_unlock (&main_arena.mutex);
+  if (!oldp)
+    {
+      malloc_printerr (check_action, "realloc(): invalid pointer", oldmem);
+      return malloc_check (bytes, NULL);
+    }
+  const INTERNAL_SIZE_T oldsize = chunksize (oldp);
+
+  checked_request2size (bytes + 1, nb);
+  (void) mutex_lock (&main_arena.mutex);
+
+  if (chunk_is_mmapped (oldp))
+    {
 #if HAVE_MREMAP
-    mchunkptr newp = mremap_chunk(oldp, nb);
-    if(newp)
-      newmem = chunk2mem(newp);
-    else
+      mchunkptr newp = mremap_chunk (oldp, nb);
+      if (newp)
+        newmem = chunk2mem (newp);
+      else
 #endif
-    {
-      /* Note the extra SIZE_SZ overhead. */
-      if(oldsize - SIZE_SZ >= nb)
-	newmem = oldmem; /* do nothing */
-      else {
-	/* Must alloc, copy, free. */
-	if (top_check() >= 0)
-	  newmem = _int_malloc(&main_arena, bytes+1);
-	if (newmem) {
-	  memcpy(newmem, oldmem, oldsize - 2*SIZE_SZ);
-	  munmap_chunk(oldp);
-	}
+      {
+        /* Note the extra SIZE_SZ overhead. */
+        if (oldsize - SIZE_SZ >= nb)
+          newmem = oldmem; /* do nothing */
+        else
+          {
+            /* Must alloc, copy, free. */
+            if (top_check () >= 0)
+              newmem = _int_malloc (&main_arena, bytes + 1);
+            if (newmem)
+              {
+                memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
+                munmap_chunk (oldp);
+              }
+          }
       }
     }
-  } else {
-    if (top_check() >= 0) {
-      INTERNAL_SIZE_T nb;
-      checked_request2size(bytes + 1, nb);
-      newmem = _int_realloc(&main_arena, oldp, oldsize, nb);
-    }
-  }
+  else
+    {
+      if (top_check () >= 0)
+        {
+          INTERNAL_SIZE_T nb;
+          checked_request2size (bytes + 1, nb);
+          newmem = _int_realloc (&main_arena, oldp, oldsize, nb);
+        }
+    }
 
   /* mem2chunk_check changed the magic byte in the old chunk.
      If newmem is NULL, then the old chunk will still be used though,
      so we need to invert that change here.  */
-  if (newmem == NULL) *magic_p ^= 0xFF;
-
-  (void)mutex_unlock(&main_arena.mutex);
-
-  return mem2mem_check(newmem, bytes);
-}
-
-static void*
-memalign_check(size_t alignment, size_t bytes, const void *caller)
-{
-  void* mem;
-
-  if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
-  if (alignment <  MINSIZE) alignment = MINSIZE;
+  if (newmem == NULL)
+    *magic_p ^= 0xFF;
+
+  (void) mutex_unlock (&main_arena.mutex);
+
+  return mem2mem_check (newmem, bytes);
+}
+
+static void *
+memalign_check (size_t alignment, size_t bytes, const void *caller)
+{
+  void *mem;
+
+  if (alignment <= MALLOC_ALIGNMENT)
+    return malloc_check (bytes, NULL);
+
+  if (alignment < MINSIZE)
+    alignment = MINSIZE;
 
   /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
      power of 2 and will cause overflow in the check below.  */
@@ -377,17 +414,19 @@
     }
 
   /* Make sure alignment is power of 2.  */
-  if (!powerof2(alignment)) {
-    size_t a = MALLOC_ALIGNMENT * 2;
-    while (a < alignment) a <<= 1;
-    alignment = a;
-  }
-
-  (void)mutex_lock(&main_arena.mutex);
-  mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
-    NULL;
-  (void)mutex_unlock(&main_arena.mutex);
-  return mem2mem_check(mem, bytes);
+  if (!powerof2 (alignment))
+    {
+      size_t a = MALLOC_ALIGNMENT * 2;
+      while (a < alignment)
+        a <<= 1;
+      alignment = a;
+    }
+
+  (void) mutex_lock (&main_arena.mutex);
+  mem = (top_check () >= 0) ? _int_memalign (&main_arena, alignment, bytes + 1) :
+        NULL;
+  (void) mutex_unlock (&main_arena.mutex);
+  return mem2mem_check (mem, bytes);
 }
 
 
@@ -408,59 +447,63 @@
    then the hooks are reset to 0.  */
 
 #define MALLOC_STATE_MAGIC   0x444c4541l
-#define MALLOC_STATE_VERSION (0*0x100l + 4l) /* major*0x100 + minor */
-
-struct malloc_save_state {
-  long          magic;
-  long          version;
-  mbinptr       av[NBINS * 2 + 2];
-  char*         sbrk_base;
-  int           sbrked_mem_bytes;
+#define MALLOC_STATE_VERSION (0 * 0x100l + 4l) /* major*0x100 + minor */
+
+struct malloc_save_state
+{
+  long magic;
+  long version;
+  mbinptr av[NBINS * 2 + 2];
+  char *sbrk_base;
+  int sbrked_mem_bytes;
   unsigned long trim_threshold;
   unsigned long top_pad;
-  unsigned int  n_mmaps_max;
+  unsigned int n_mmaps_max;
   unsigned long mmap_threshold;
-  int           check_action;
+  int check_action;
   unsigned long max_sbrked_mem;
   unsigned long max_total_mem;
-  unsigned int  n_mmaps;
-  unsigned int  max_n_mmaps;
+  unsigned int n_mmaps;
+  unsigned int max_n_mmaps;
   unsigned long mmapped_mem;
   unsigned long max_mmapped_mem;
-  int           using_malloc_checking;
+  int using_malloc_checking;
   unsigned long max_fast;
   unsigned long arena_test;
   unsigned long arena_max;
   unsigned long narenas;
 };
 
-void*
-__malloc_get_state(void)
-{
-  struct malloc_save_state* ms;
+void *
+__malloc_get_state (void)
+{
+  struct malloc_save_state *ms;
   int i;
   mbinptr b;
 
-  ms = (struct malloc_save_state*)__libc_malloc(sizeof(*ms));
+  ms = (struct malloc_save_state *) __libc_malloc (sizeof (*ms));
   if (!ms)
     return 0;
-  (void)mutex_lock(&main_arena.mutex);
-  malloc_consolidate(&main_arena);
+
+  (void) mutex_lock (&main_arena.mutex);
+  malloc_consolidate (&main_arena);
   ms->magic = MALLOC_STATE_MAGIC;
   ms->version = MALLOC_STATE_VERSION;
   ms->av[0] = 0;
   ms->av[1] = 0; /* used to be binblocks, now no longer used */
-  ms->av[2] = top(&main_arena);
+  ms->av[2] = top (&main_arena);
   ms->av[3] = 0; /* used to be undefined */
-  for(i=1; i<NBINS; i++) {
-    b = bin_at(&main_arena, i);
-    if(first(b) == b)
-      ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin */
-    else {
-      ms->av[2*i+2] = first(b);
-      ms->av[2*i+3] = last(b);
-    }
-  }
+  for (i = 1; i < NBINS; i++)
+    {
+      b = bin_at (&main_arena, i);
+      if (first (b) == b)
+        ms->av[2 * i + 2] = ms->av[2 * i + 3] = 0; /* empty bin */
+      else
+        {
+          ms->av[2 * i + 2] = first (b);
+          ms->av[2 * i + 3] = last (b);
+        }
+    }
   ms->sbrk_base = mp_.sbrk_base;
   ms->sbrked_mem_bytes = main_arena.system_mem;
   ms->trim_threshold = mp_.trim_threshold;
@@ -475,78 +518,92 @@
   ms->mmapped_mem = mp_.mmapped_mem;
   ms->max_mmapped_mem = mp_.max_mmapped_mem;
   ms->using_malloc_checking = using_malloc_checking;
-  ms->max_fast = get_max_fast();
+  ms->max_fast = get_max_fast ();
   ms->arena_test = mp_.arena_test;
   ms->arena_max = mp_.arena_max;
   ms->narenas = narenas;
-  (void)mutex_unlock(&main_arena.mutex);
-  return (void*)ms;
+  (void) mutex_unlock (&main_arena.mutex);
+  return (void *) ms;
 }
 
 int
-__malloc_set_state(void* msptr)
-{
-  struct malloc_save_state* ms = (struct malloc_save_state*)msptr;
+__malloc_set_state (void *msptr)
+{
+  struct malloc_save_state *ms = (struct malloc_save_state *) msptr;
   size_t i;
   mbinptr b;
 
   disallow_malloc_check = 1;
-  ptmalloc_init();
-  if(ms->magic != MALLOC_STATE_MAGIC) return -1;
+  ptmalloc_init ();
+  if (ms->magic != MALLOC_STATE_MAGIC)
+    return -1;
+
   /* Must fail if the major version is too high. */
-  if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
-  (void)mutex_lock(&main_arena.mutex);
+  if ((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl))
+    return -2;
+
+  (void) mutex_lock (&main_arena.mutex);
   /* There are no fastchunks.  */
-  clear_fastchunks(&main_arena);
+  clear_fastchunks (&main_arena);
   if (ms->version >= 4)
-    set_max_fast(ms->max_fast);
+    set_max_fast (ms->max_fast);
   else
-    set_max_fast(64);	/* 64 used to be the value we always used.  */
-  for (i=0; i<NFASTBINS; ++i)
+    set_max_fast (64);  /* 64 used to be the value we always used.  */
+  for (i = 0; i < NFASTBINS; ++i)
     fastbin (&main_arena, i) = 0;
-  for (i=0; i<BINMAPSIZE; ++i)
+  for (i = 0; i < BINMAPSIZE; ++i)
     main_arena.binmap[i] = 0;
-  top(&main_arena) = ms->av[2];
+  top (&main_arena) = ms->av[2];
   main_arena.last_remainder = 0;
-  for(i=1; i<NBINS; i++) {
-    b = bin_at(&main_arena, i);
-    if(ms->av[2*i+2] == 0) {
-      assert(ms->av[2*i+3] == 0);
-      first(b) = last(b) = b;
-    } else {
-      if(ms->version >= 3 &&
-	 (i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
-			   largebin_index(chunksize(ms->av[2*i+3]))==i))) {
-	first(b) = ms->av[2*i+2];
-	last(b) = ms->av[2*i+3];
-	/* Make sure the links to the bins within the heap are correct.  */
-	first(b)->bk = b;
-	last(b)->fd = b;
-	/* Set bit in binblocks.  */
-	mark_bin(&main_arena, i);
-      } else {
-	/* Oops, index computation from chunksize must have changed.
-	   Link the whole list into unsorted_chunks.  */
-	first(b) = last(b) = b;
-	b = unsorted_chunks(&main_arena);
-	ms->av[2*i+2]->bk = b;
-	ms->av[2*i+3]->fd = b->fd;
-	b->fd->bk = ms->av[2*i+3];
-	b->fd = ms->av[2*i+2];
-      }
-    }
-  }
-  if (ms->version < 3) {
-    /* Clear fd_nextsize and bk_nextsize fields.  */
-    b = unsorted_chunks(&main_arena)->fd;
-    while (b != unsorted_chunks(&main_arena)) {
-      if (!in_smallbin_range(chunksize(b))) {
-	b->fd_nextsize = NULL;
-	b->bk_nextsize = NULL;
-      }
-      b = b->fd;
-    }
-  }
+  for (i = 1; i < NBINS; i++)
+    {
+      b = bin_at (&main_arena, i);
+      if (ms->av[2 * i + 2] == 0)
+        {
+          assert (ms->av[2 * i + 3] == 0);
+          first (b) = last (b) = b;
+        }
+      else
+        {
+          if (ms->version >= 3 &&
+              (i < NSMALLBINS || (largebin_index (chunksize (ms->av[2 * i + 2])) == i &&
+                                  largebin_index (chunksize (ms->av[2 * i + 3])) == i)))
+            {
+              first (b) = ms->av[2 * i + 2];
+              last (b) = ms->av[2 * i + 3];
+              /* Make sure the links to the bins within the heap are correct.  */
+              first (b)->bk = b;
+              last (b)->fd = b;
+              /* Set bit in binblocks.  */
+              mark_bin (&main_arena, i);
+            }
+          else
+            {
+              /* Oops, index computation from chunksize must have changed.
+                 Link the whole list into unsorted_chunks.  */
+              first (b) = last (b) = b;
+              b = unsorted_chunks (&main_arena);
+              ms->av[2 * i + 2]->bk = b;
+              ms->av[2 * i + 3]->fd = b->fd;
+              b->fd->bk = ms->av[2 * i + 3];
+              b->fd = ms->av[2 * i + 2];
+            }
+        }
+    }
+  if (ms->version < 3)
+    {
+      /* Clear fd_nextsize and bk_nextsize fields.  */
+      b = unsorted_chunks (&main_arena)->fd;
+      while (b != unsorted_chunks (&main_arena))
+        {
+          if (!in_smallbin_range (chunksize (b)))
+            {
+              b->fd_nextsize = NULL;
+              b->bk_nextsize = NULL;
+            }
+          b = b->fd;
+        }
+    }
   mp_.sbrk_base = ms->sbrk_base;
   main_arena.system_mem = ms->sbrked_mem_bytes;
   mp_.trim_threshold = ms->trim_threshold;
@@ -560,28 +617,31 @@
   mp_.mmapped_mem = ms->mmapped_mem;
   mp_.max_mmapped_mem = ms->max_mmapped_mem;
   /* add version-dependent code here */
-  if (ms->version >= 1) {
-    /* Check whether it is safe to enable malloc checking, or whether
-       it is necessary to disable it.  */
-    if (ms->using_malloc_checking && !using_malloc_checking &&
-	!disallow_malloc_check)
-      __malloc_check_init ();
-    else if (!ms->using_malloc_checking && using_malloc_checking) {
-      __malloc_hook = NULL;
-      __free_hook = NULL;
-      __realloc_hook = NULL;
-      __memalign_hook = NULL;
-      using_malloc_checking = 0;
-    }
-  }
-  if (ms->version >= 4) {
-    mp_.arena_test = ms->arena_test;
-    mp_.arena_max = ms->arena_max;
-    narenas = ms->narenas;
-  }
-  check_malloc_state(&main_arena);
-
-  (void)mutex_unlock(&main_arena.mutex);
+  if (ms->version >= 1)
+    {
+      /* Check whether it is safe to enable malloc checking, or whether
+         it is necessary to disable it.  */
+      if (ms->using_malloc_checking && !using_malloc_checking &&
+          !disallow_malloc_check)
+        __malloc_check_init ();
+      else if (!ms->using_malloc_checking && using_malloc_checking)
+        {
+          __malloc_hook = NULL;
+          __free_hook = NULL;
+          __realloc_hook = NULL;
+          __memalign_hook = NULL;
+          using_malloc_checking = 0;
+        }
+    }
+  if (ms->version >= 4)
+    {
+      mp_.arena_test = ms->arena_test;
+      mp_.arena_max = ms->arena_max;
+      narenas = ms->narenas;
+    }
+  check_malloc_state (&main_arena);
+
+  (void) mutex_unlock (&main_arena.mutex);
   return 0;
 }
 

Modified: fsf/trunk/libc/malloc/malloc.c
==============================================================================
--- fsf/trunk/libc/malloc/malloc.c (original)
+++ fsf/trunk/libc/malloc/malloc.c Fri Jan  3 00:01:54 2014
@@ -353,10 +353,10 @@
    malloc_set_state than will returning blocks not adequately aligned for
    long double objects under -mlong-double-128.  */
 
-#  define MALLOC_ALIGNMENT       (2 * SIZE_SZ < __alignof__ (long double) \
-				  ? __alignof__ (long double) : 2 * SIZE_SZ)
+#  define MALLOC_ALIGNMENT       (2 *SIZE_SZ < __alignof__ (long double)      \
+                                  ? __alignof__ (long double) : 2 *SIZE_SZ)
 # else
-#  define MALLOC_ALIGNMENT       (2 * SIZE_SZ)
+#  define MALLOC_ALIGNMENT       (2 *SIZE_SZ)
 # endif
 #endif
 
@@ -463,10 +463,10 @@
      some systems, if the application first decrements and then
      increments the break value, the contents of the reallocated space
      are unspecified.
-*/
+ */
 
 #ifndef MORECORE_CLEARS
-#define MORECORE_CLEARS 1
+# define MORECORE_CLEARS 1
 #endif
 
 
@@ -1232,11 +1232,11 @@
    Check if a request is so large that it would wrap around zero when
    padded and aligned. To simplify some other code, the bound is made
    low enough so that adding MINSIZE will also not wrap around zero.
-*/
+ */
 
 #define REQUEST_OUT_OF_RANGE(req)                                 \
-  ((unsigned long)(req) >=                                        \
-   (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE))
+  ((unsigned long) (req) >=						      \
+   (unsigned long) (INTERNAL_SIZE_T) (-2 * MINSIZE))
 
 /* pad request bytes into a usable size -- internal version */
 
@@ -1248,15 +1248,15 @@
 /*  Same, except also perform argument check */
 
 #define checked_request2size(req, sz)                             \
-  if (REQUEST_OUT_OF_RANGE(req)) {                                \
-    __set_errno (ENOMEM);					  \
-    return 0;                                                     \
-  }                                                               \
-  (sz) = request2size(req);
-
-/*
-  --------------- Physical chunk operations ---------------
-*/
+  if (REQUEST_OUT_OF_RANGE (req)) {					      \
+      __set_errno (ENOMEM);						      \
+      return 0;								      \
+    }									      \
+  (sz) = request2size (req);
+
+/*
+   --------------- Physical chunk operations ---------------
+ */
 
 
 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
@@ -1283,49 +1283,49 @@
 
 
 /*
-  Bits to mask off when extracting size
-
-  Note: IS_MMAPPED is intentionally not masked off from size field in
-  macros for which mmapped chunks should never be seen. This should
-  cause helpful core dumps to occur if it is tried by accident by
-  people extending or adapting this malloc.
-*/
-#define SIZE_BITS (PREV_INUSE|IS_MMAPPED|NON_MAIN_ARENA)
+   Bits to mask off when extracting size
+
+   Note: IS_MMAPPED is intentionally not masked off from size field in
+   macros for which mmapped chunks should never be seen. This should
+   cause helpful core dumps to occur if it is tried by accident by
+   people extending or adapting this malloc.
+ */
+#define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
 
 /* Get size, ignoring use bits */
 #define chunksize(p)         ((p)->size & ~(SIZE_BITS))
 
 
 /* Ptr to next physical malloc_chunk. */
-#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) ))
+#define next_chunk(p) ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))
 
 /* Ptr to previous physical malloc_chunk */
-#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
+#define prev_chunk(p) ((mchunkptr) (((char *) (p)) - ((p)->prev_size)))
 
 /* Treat space at ptr + offset as a chunk */
-#define chunk_at_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
+#define chunk_at_offset(p, s)  ((mchunkptr) (((char *) (p)) + (s)))
 
 /* extract p's inuse bit */
-#define inuse(p)\
-((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
+#define inuse(p)							      \
+  ((((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
 
 /* set/clear chunk as being inuse without otherwise disturbing */
-#define set_inuse(p)\
-((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
-
-#define clear_inuse(p)\
-((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
+#define set_inuse(p)							      \
+  ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
+
+#define clear_inuse(p)							      \
+  ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
 
 
 /* check/set/clear inuse bits in known places */
-#define inuse_bit_at_offset(p, s)\
- (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
-
-#define set_inuse_bit_at_offset(p, s)\
- (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
-
-#define clear_inuse_bit_at_offset(p, s)\
- (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
+#define inuse_bit_at_offset(p, s)					      \
+  (((mchunkptr) (((char *) (p)) + (s)))->size & PREV_INUSE)
+
+#define set_inuse_bit_at_offset(p, s)					      \
+  (((mchunkptr) (((char *) (p)) + (s)))->size |= PREV_INUSE)
+
+#define clear_inuse_bit_at_offset(p, s)					      \
+  (((mchunkptr) (((char *) (p)) + (s)))->size &= ~(PREV_INUSE))
 
 
 /* Set size at head, without disturbing its use bit */
@@ -1335,26 +1335,26 @@
 #define set_head(p, s)       ((p)->size = (s))
 
 /* Set size at footer (only when chunk is not in use) */
-#define set_foot(p, s)       (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
-
-
-/*
-  -------------------- Internal data structures --------------------
+#define set_foot(p, s)       (((mchunkptr) ((char *) (p) + (s)))->prev_size = (s))
+
+
+/*
+   -------------------- Internal data structures --------------------
 
    All internal state is held in an instance of malloc_state defined
    below. There are no other static variables, except in two optional
    cases:
-   * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
-   * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
+ * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
+ * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
      for mmap.
 
    Beware of lots of tricks that minimize the total bookkeeping space
    requirements. The result is a little over 1K bytes (for 4byte
    pointers and size_t.)
-*/
-
-/*
-  Bins
+ */
+
+/*
+   Bins
 
     An array of bin headers for free chunks. Each bin is doubly
     linked.  The bins are approximately proportionally (log) spaced.
@@ -1387,17 +1387,17 @@
     But to conserve space and improve locality, we allocate
     only the fd/bk pointers of bins, and then use repositioning tricks
     to treat these as the fields of a malloc_chunk*.
-*/
-
-typedef struct malloc_chunk* mbinptr;
+ */
+
+typedef struct malloc_chunk *mbinptr;
 
 /* addressing -- note that bin_at(0) does not exist */
 #define bin_at(m, i) \
   (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2]))			      \
-	     - offsetof (struct malloc_chunk, fd))
+             - offsetof (struct malloc_chunk, fd))
 
 /* analog of ++bin */
-#define next_bin(b)  ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
+#define next_bin(b)  ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
 
 /* Reminders about list directionality within bins */
 #define first(b)     ((b)->fd)
@@ -1405,36 +1405,36 @@
 
 /* Take a chunk off a bin list */
 #define unlink(P, BK, FD) {                                            \
-  FD = P->fd;                                                          \
-  BK = P->bk;                                                          \
-  if (__builtin_expect (FD->bk != P || BK->fd != P, 0))                \
-    malloc_printerr (check_action, "corrupted double-linked list", P); \
-  else {                                                               \
-    FD->bk = BK;                                                       \
-    BK->fd = FD;                                                       \
-    if (!in_smallbin_range (P->size)				       \
-	&& __builtin_expect (P->fd_nextsize != NULL, 0)) {	       \
-      assert (P->fd_nextsize->bk_nextsize == P);		       \
-      assert (P->bk_nextsize->fd_nextsize == P);		       \
-      if (FD->fd_nextsize == NULL) {				       \
-	if (P->fd_nextsize == P)				       \
-	  FD->fd_nextsize = FD->bk_nextsize = FD;		       \
-	else {							       \
-	  FD->fd_nextsize = P->fd_nextsize;			       \
-	  FD->bk_nextsize = P->bk_nextsize;			       \
-	  P->fd_nextsize->bk_nextsize = FD;			       \
-	  P->bk_nextsize->fd_nextsize = FD;			       \
-	}							       \
-      }	else {							       \
-	P->fd_nextsize->bk_nextsize = P->bk_nextsize;		       \
-	P->bk_nextsize->fd_nextsize = P->fd_nextsize;		       \
-      }								       \
-    }								       \
-  }                                                                    \
+    FD = P->fd;								      \
+    BK = P->bk;								      \
+    if (__builtin_expect (FD->bk != P || BK->fd != P, 0))		      \
+      malloc_printerr (check_action, "corrupted double-linked list", P);      \
+    else {								      \
+        FD->bk = BK;							      \
+        BK->fd = FD;							      \
+        if (!in_smallbin_range (P->size)				      \
+            && __builtin_expect (P->fd_nextsize != NULL, 0)) {		      \
+            assert (P->fd_nextsize->bk_nextsize == P);			      \
+            assert (P->bk_nextsize->fd_nextsize == P);			      \
+            if (FD->fd_nextsize == NULL) {				      \
+                if (P->fd_nextsize == P)				      \
+                  FD->fd_nextsize = FD->bk_nextsize = FD;		      \
+                else {							      \
+                    FD->fd_nextsize = P->fd_nextsize;			      \
+                    FD->bk_nextsize = P->bk_nextsize;			      \
+                    P->fd_nextsize->bk_nextsize = FD;			      \
+                    P->bk_nextsize->fd_nextsize = FD;			      \
+                  }							      \
+              } else {							      \
+                P->fd_nextsize->bk_nextsize = P->bk_nextsize;		      \
+                P->bk_nextsize->fd_nextsize = P->fd_nextsize;		      \
+              }								      \
+          }								      \
+      }									      \
 }
 
 /*
-  Indexing
+   Indexing
 
     Bins for sizes < 512 bytes contain chunks of all the same size, spaced
     8 bytes apart. Larger bins are approximately logarithmically spaced:
@@ -1455,7 +1455,7 @@
 
     Bin 0 does not exist.  Bin 1 is the unordered list; if that would be
     a valid chunk size the small bins are bumped up one.
-*/
+ */
 
 #define NBINS             128
 #define NSMALLBINS         64
@@ -1464,38 +1464,38 @@
 #define MIN_LARGE_SIZE    ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
 
 #define in_smallbin_range(sz)  \
-  ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
+  ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)
 
 #define smallbin_index(sz) \
-  ((SMALLBIN_WIDTH == 16 ? (((unsigned)(sz)) >> 4) : (((unsigned)(sz)) >> 3)) \
+  ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\
    + SMALLBIN_CORRECTION)
 
 #define largebin_index_32(sz)                                                \
-(((((unsigned long)(sz)) >>  6) <= 38)?  56 + (((unsigned long)(sz)) >>  6): \
- ((((unsigned long)(sz)) >>  9) <= 20)?  91 + (((unsigned long)(sz)) >>  9): \
- ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
- ((((unsigned long)(sz)) >> 15) <=  4)? 119 + (((unsigned long)(sz)) >> 15): \
- ((((unsigned long)(sz)) >> 18) <=  2)? 124 + (((unsigned long)(sz)) >> 18): \
-					126)
+  (((((unsigned long) (sz)) >> 6) <= 38) ?  56 + (((unsigned long) (sz)) >> 6) :\
+   ((((unsigned long) (sz)) >> 9) <= 20) ?  91 + (((unsigned long) (sz)) >> 9) :\
+   ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
+   ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
+   ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
+   126)
 
 #define largebin_index_32_big(sz)                                            \
-(((((unsigned long)(sz)) >>  6) <= 45)?  49 + (((unsigned long)(sz)) >>  6): \
- ((((unsigned long)(sz)) >>  9) <= 20)?  91 + (((unsigned long)(sz)) >>  9): \
- ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
- ((((unsigned long)(sz)) >> 15) <=  4)? 119 + (((unsigned long)(sz)) >> 15): \
- ((((unsigned long)(sz)) >> 18) <=  2)? 124 + (((unsigned long)(sz)) >> 18): \
-                                        126)
+  (((((unsigned long) (sz)) >> 6) <= 45) ?  49 + (((unsigned long) (sz)) >> 6) :\
+   ((((unsigned long) (sz)) >> 9) <= 20) ?  91 + (((unsigned long) (sz)) >> 9) :\
+   ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
+   ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
+   ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
+   126)
 
 // XXX It remains to be seen whether it is good to keep the widths of
 // XXX the buckets the same or whether it should be scaled by a factor
 // XXX of two as well.
 #define largebin_index_64(sz)                                                \
-(((((unsigned long)(sz)) >>  6) <= 48)?  48 + (((unsigned long)(sz)) >>  6): \
- ((((unsigned long)(sz)) >>  9) <= 20)?  91 + (((unsigned long)(sz)) >>  9): \
- ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
- ((((unsigned long)(sz)) >> 15) <=  4)? 119 + (((unsigned long)(sz)) >> 15): \
- ((((unsigned long)(sz)) >> 18) <=  2)? 124 + (((unsigned long)(sz)) >> 18): \
-					126)
+  (((((unsigned long) (sz)) >> 6) <= 48) ?  48 + (((unsigned long) (sz)) >> 6) :\
+   ((((unsigned long) (sz)) >> 9) <= 20) ?  91 + (((unsigned long) (sz)) >> 9) :\
+   ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
+   ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
+   ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
+   126)
 
 #define largebin_index(sz) \
   (SIZE_SZ == 8 ? largebin_index_64 (sz)                                     \
@@ -1503,11 +1503,11 @@
    : largebin_index_32 (sz))
 
 #define bin_index(sz) \
- ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
-
-
-/*
-  Unsorted chunks
+  ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz))
+
+
+/*
+   Unsorted chunks
 
     All remainders from chunk splits, as well as all returned chunks,
     are first placed in the "unsorted" bin. They are then placed
@@ -1518,13 +1518,13 @@
 
     The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
     does not have to be taken into account in size comparisons.
-*/
+ */
 
 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
-#define unsorted_chunks(M)          (bin_at(M, 1))
-
-/*
-  Top
+#define unsorted_chunks(M)          (bin_at (M, 1))
+
+/*
+   Top
 
     The top-most available chunk (i.e., the one bordering the end of
     available memory) is treated specially. It is never included in
@@ -1539,13 +1539,13 @@
     interval between initialization and the first call to
     sysmalloc. (This is somewhat delicate, since it relies on
     the 2 preceding words to be zero during this interval as well.)
-*/
+ */
 
 /* Conveniently, the unsorted bin can be used as dummy top on first call */
-#define initial_top(M)              (unsorted_chunks(M))
-
-/*
-  Binmap
+#define initial_top(M)              (unsorted_chunks (M))
+
+/*
+   Binmap
 
     To help compensate for the large number of bins, a one-level index
     structure is used for bin-by-bin searching.  `binmap' is a
@@ -1553,7 +1553,7 @@
     be skipped over during during traversals.  The bits are NOT always
     cleared as soon as bins are empty, but instead only
     when they are noticed to be empty during traversal in malloc.
-*/
+ */
 
 /* Conservatively use 32 bits per map word, even if on 64bit system */
 #define BINMAPSHIFT      5
@@ -1561,14 +1561,14 @@
 #define BINMAPSIZE       (NBINS / BITSPERMAP)
 
 #define idx2block(i)     ((i) >> BINMAPSHIFT)
-#define idx2bit(i)       ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
-
-#define mark_bin(m,i)    ((m)->binmap[idx2block(i)] |=  idx2bit(i))
-#define unmark_bin(m,i)  ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
-#define get_binmap(m,i)  ((m)->binmap[idx2block(i)] &   idx2bit(i))
-
-/*
-  Fastbins
+#define idx2bit(i)       ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))
+
+#define mark_bin(m, i)    ((m)->binmap[idx2block (i)] |= idx2bit (i))
+#define unmark_bin(m, i)  ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))
+#define get_binmap(m, i)  ((m)->binmap[idx2block (i)] & idx2bit (i))
+
+/*
+   Fastbins
 
     An array of lists holding recently freed small chunks.  Fastbins
     are not doubly linked.  It is faster to single-link them, and
@@ -1582,69 +1582,69 @@
     be consolidated with other free chunks. malloc_consolidate
     releases all chunks in fastbins and consolidates them with
     other free chunks.
-*/
-
-typedef struct malloc_chunk* mfastbinptr;
+ */
+
+typedef struct malloc_chunk *mfastbinptr;
 #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
 
 /* offset 2 to use otherwise unindexable first 2 bins */
 #define fastbin_index(sz) \
-  ((((unsigned int)(sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
+  ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
 
 
 /* The maximum fastbin request size we support */
 #define MAX_FAST_SIZE     (80 * SIZE_SZ / 4)
 
-#define NFASTBINS  (fastbin_index(request2size(MAX_FAST_SIZE))+1)
-
-/*
-  FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
-  that triggers automatic consolidation of possibly-surrounding
-  fastbin chunks. This is a heuristic, so the exact value should not
-  matter too much. It is defined at half the default trim threshold as a
-  compromise heuristic to only attempt consolidation if it is likely
-  to lead to trimming. However, it is not dynamically tunable, since
-  consolidation reduces fragmentation surrounding large chunks even
-  if trimming is not used.
-*/
+#define NFASTBINS  (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
+
+/*
+   FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
+   that triggers automatic consolidation of possibly-surrounding
+   fastbin chunks. This is a heuristic, so the exact value should not
+   matter too much. It is defined at half the default trim threshold as a
+   compromise heuristic to only attempt consolidation if it is likely
+   to lead to trimming. However, it is not dynamically tunable, since
+   consolidation reduces fragmentation surrounding large chunks even
+   if trimming is not used.
+ */
 
 #define FASTBIN_CONSOLIDATION_THRESHOLD  (65536UL)
 
 /*
-  Since the lowest 2 bits in max_fast don't matter in size comparisons,
-  they are used as flags.
-*/
-
-/*
-  FASTCHUNKS_BIT held in max_fast indicates that there are probably
-  some fastbin chunks. It is set true on entering a chunk into any
-  fastbin, and cleared only in malloc_consolidate.
-
-  The truth value is inverted so that have_fastchunks will be true
-  upon startup (since statics are zero-filled), simplifying
-  initialization checks.
-*/
+   Since the lowest 2 bits in max_fast don't matter in size comparisons,
+   they are used as flags.
+ */
+
+/*
+   FASTCHUNKS_BIT held in max_fast indicates that there are probably
+   some fastbin chunks. It is set true on entering a chunk into any
+   fastbin, and cleared only in malloc_consolidate.
+
+   The truth value is inverted so that have_fastchunks will be true
+   upon startup (since statics are zero-filled), simplifying
+   initialization checks.
+ */
 
 #define FASTCHUNKS_BIT        (1U)
 
-#define have_fastchunks(M)     (((M)->flags &  FASTCHUNKS_BIT) == 0)
+#define have_fastchunks(M)     (((M)->flags & FASTCHUNKS_BIT) == 0)
 #define clear_fastchunks(M)    catomic_or (&(M)->flags, FASTCHUNKS_BIT)
 #define set_fastchunks(M)      catomic_and (&(M)->flags, ~FASTCHUNKS_BIT)
 
 /*
-  NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
-  regions.  Otherwise, contiguity is exploited in merging together,
-  when possible, results from consecutive MORECORE calls.
-
-  The initial value comes from MORECORE_CONTIGUOUS, but is
-  changed dynamically if mmap is ever used as an sbrk substitute.
-*/
+   NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
+   regions.  Otherwise, contiguity is exploited in merging together,
+   when possible, results from consecutive MORECORE calls.
+
+   The initial value comes from MORECORE_CONTIGUOUS, but is
+   changed dynamically if mmap is ever used as an sbrk substitute.
+ */
 
 #define NONCONTIGUOUS_BIT     (2U)
 
-#define contiguous(M)          (((M)->flags &  NONCONTIGUOUS_BIT) == 0)
-#define noncontiguous(M)       (((M)->flags &  NONCONTIGUOUS_BIT) != 0)
-#define set_noncontiguous(M)   ((M)->flags |=  NONCONTIGUOUS_BIT)
+#define contiguous(M)          (((M)->flags & NONCONTIGUOUS_BIT) == 0)
+#define noncontiguous(M)       (((M)->flags & NONCONTIGUOUS_BIT) != 0)
+#define set_noncontiguous(M)   ((M)->flags |= NONCONTIGUOUS_BIT)
 #define set_contiguous(M)      ((M)->flags &= ~NONCONTIGUOUS_BIT)
 
 /*
@@ -1652,19 +1652,20 @@
    Use impossibly small value if 0.
    Precondition: there are no existing fastbin chunks.
    Setting the value clears fastchunk bit but preserves noncontiguous bit.
-*/
+ */
 
 #define set_max_fast(s) \
   global_max_fast = (((s) == 0)						      \
-		     ? SMALLBIN_WIDTH: ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
+                     ? SMALLBIN_WIDTH : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
 #define get_max_fast() global_max_fast
 
 
 /*
    ----------- Internal state representation and initialization -----------
-*/
-
-struct malloc_state {
+ */
+
+struct malloc_state
+{
   /* Serialize access.  */
   mutex_t mutex;
 
@@ -1677,19 +1678,19 @@
 #endif
 
   /* Fastbins */
-  mfastbinptr      fastbinsY[NFASTBINS];
+  mfastbinptr fastbinsY[NFASTBINS];
 
   /* Base of the topmost chunk -- not otherwise kept in a bin */
-  mchunkptr        top;
+  mchunkptr top;
 
   /* The remainder from the most recent split of a small request */
-  mchunkptr        last_remainder;
+  mchunkptr last_remainder;
 
   /* Normal bins packed as described above */
-  mchunkptr        bins[NBINS * 2 - 2];
+  mchunkptr bins[NBINS * 2 - 2];
 
   /* Bitmap of bins */
-  unsigned int     binmap[BINMAPSIZE];
+  unsigned int binmap[BINMAPSIZE];
 
   /* Linked list */
   struct malloc_state *next;
@@ -1702,32 +1703,33 @@
   INTERNAL_SIZE_T max_system_mem;
 };
 
-struct malloc_par {
+struct malloc_par
+{
   /* Tunable parameters */
-  unsigned long    trim_threshold;
-  INTERNAL_SIZE_T  top_pad;
-  INTERNAL_SIZE_T  mmap_threshold;
-  INTERNAL_SIZE_T  arena_test;
-  INTERNAL_SIZE_T  arena_max;
+  unsigned long trim_threshold;
+  INTERNAL_SIZE_T top_pad;
+  INTERNAL_SIZE_T mmap_threshold;
+  INTERNAL_SIZE_T arena_test;
+  INTERNAL_SIZE_T arena_max;
 
   /* Memory map support */
-  int              n_mmaps;
-  int              n_mmaps_max;
-  int              max_n_mmaps;
+  int n_mmaps;
+  int n_mmaps_max;
+  int max_n_mmaps;
   /* the mmap_threshold is dynamic, until the user sets
      it manually, at which point we need to disable any
      dynamic behavior. */
-  int              no_dyn_threshold;
+  int no_dyn_threshold;
 
   /* Statistics */
-  INTERNAL_SIZE_T  mmapped_mem;
+  INTERNAL_SIZE_T mmapped_mem;
   /*INTERNAL_SIZE_T  sbrked_mem;*/
   /*INTERNAL_SIZE_T  max_sbrked_mem;*/
-  INTERNAL_SIZE_T  max_mmapped_mem;
-  INTERNAL_SIZE_T  max_total_mem; /* only kept for NO_THREADS */
+  INTERNAL_SIZE_T max_mmapped_mem;
+  INTERNAL_SIZE_T max_total_mem;  /* only kept for NO_THREADS */
 
   /* First address handed out by MORECORE/sbrk.  */
-  char*            sbrk_base;
+  char *sbrk_base;
 };
 
 /* There are several instances of this struct ("arenas") in this
@@ -1737,22 +1739,22 @@
    is initialized to all zeroes (as is true of C statics).  */
 
 static struct malloc_state main_arena =
-  {
-    .mutex = MUTEX_INITIALIZER,
-    .next = &main_arena
-  };
+{
+  .mutex = MUTEX_INITIALIZER,
+  .next = &main_arena
+};
 
 /* There is only one instance of the malloc parameters.  */
 
 static struct malloc_par mp_ =
-  {
-    .top_pad        = DEFAULT_TOP_PAD,
-    .n_mmaps_max    = DEFAULT_MMAP_MAX,
-    .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
-    .trim_threshold = DEFAULT_TRIM_THRESHOLD,
-# define NARENAS_FROM_NCORES(n) ((n) * (sizeof(long) == 4 ? 2 : 8))
-    .arena_test     = NARENAS_FROM_NCORES (1)
-  };
+{
+  .top_pad = DEFAULT_TOP_PAD,
+  .n_mmaps_max = DEFAULT_MMAP_MAX,
+  .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
+  .trim_threshold = DEFAULT_TRIM_THRESHOLD,
+#define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
+  .arena_test = NARENAS_FROM_NCORES (1)
+};
 
 
 /*  Non public mallopt parameters.  */
@@ -1764,44 +1766,46 @@
 static INTERNAL_SIZE_T global_max_fast;
 
 /*
-  Initialize a malloc_state struct.
-
-  This is called only from within malloc_consolidate, which needs
-  be called in the same contexts anyway.  It is never called directly
-  outside of malloc_consolidate because some optimizing compilers try
-  to inline it at all call points, which turns out not to be an
-  optimization at all. (Inlining it in malloc_consolidate is fine though.)
-*/
-
-static void malloc_init_state(mstate av)
+   Initialize a malloc_state struct.
+
+   This is called only from within malloc_consolidate, which needs
+   be called in the same contexts anyway.  It is never called directly
+   outside of malloc_consolidate because some optimizing compilers try
+   to inline it at all call points, which turns out not to be an
+   optimization at all. (Inlining it in malloc_consolidate is fine though.)
+ */
+
+static void
+malloc_init_state (mstate av)
 {
-  int     i;
+  int i;
   mbinptr bin;
 
   /* Establish circular links for normal bins */
-  for (i = 1; i < NBINS; ++i) {
-    bin = bin_at(av,i);
-    bin->fd = bin->bk = bin;
-  }
+  for (i = 1; i < NBINS; ++i)
+    {
+      bin = bin_at (av, i);
+      bin->fd = bin->bk = bin;
+    }
 
 #if MORECORE_CONTIGUOUS
   if (av != &main_arena)
 #endif
-    set_noncontiguous(av);
+  set_noncontiguous (av);
   if (av == &main_arena)
-    set_max_fast(DEFAULT_MXFAST);
+    set_max_fast (DEFAULT_MXFAST);
   av->flags |= FASTCHUNKS_BIT;
 
-  av->top            = initial_top(av);
+  av->top = initial_top (av);
 }
 
 /*
    Other internal utilities operating on mstates
-*/
-
-static void*  sysmalloc(INTERNAL_SIZE_T, mstate);
-static int      systrim(size_t, mstate);
-static void     malloc_consolidate(mstate);
+ */
+
+static void *sysmalloc (INTERNAL_SIZE_T, mstate);
+static int      systrim (size_t, mstate);
+static void     malloc_consolidate (mstate);
 
 
 /* -------------- Early definitions for debugging hooks ---------------- */
@@ -1815,31 +1819,31 @@
 #endif
 
 /* Forward declarations.  */
-static void* malloc_hook_ini (size_t sz,
-			      const void *caller) __THROW;
-static void* realloc_hook_ini (void* ptr, size_t sz,
-			       const void *caller) __THROW;
-static void* memalign_hook_ini (size_t alignment, size_t sz,
-				const void *caller) __THROW;
+static void *malloc_hook_ini (size_t sz,
+                              const void *caller) __THROW;
+static void *realloc_hook_ini (void *ptr, size_t sz,
+                               const void *caller) __THROW;
+static void *memalign_hook_ini (size_t alignment, size_t sz,
+                                const void *caller) __THROW;
 
 void weak_variable (*__malloc_initialize_hook) (void) = NULL;
 void weak_variable (*__free_hook) (void *__ptr,
-				   const void *) = NULL;
+                                   const void *) = NULL;
 void *weak_variable (*__malloc_hook)
-     (size_t __size, const void *) = malloc_hook_ini;
+  (size_t __size, const void *) = malloc_hook_ini;
 void *weak_variable (*__realloc_hook)
-     (void *__ptr, size_t __size, const void *)
-     = realloc_hook_ini;
+  (void *__ptr, size_t __size, const void *)
+  = realloc_hook_ini;
 void *weak_variable (*__memalign_hook)
-     (size_t __alignment, size_t __size, const void *)
-     = memalign_hook_ini;
+  (size_t __alignment, size_t __size, const void *)
+  = memalign_hook_ini;
 void weak_variable (*__after_morecore_hook) (void) = NULL;
 
 
 /* ---------------- Error behavior ------------------------------------ */
 
 #ifndef DEFAULT_CHECK_ACTION
-#define DEFAULT_CHECK_ACTION 3
+# define DEFAULT_CHECK_ACTION 3
 #endif
 
 static int check_action = DEFAULT_CHECK_ACTION;
@@ -1871,207 +1875,220 @@
 #include "arena.c"
 
 /*
-  Debugging support
-
-  These routines make a number of assertions about the states
-  of data structures that should be true at all times. If any
-  are not true, it's very likely that a user program has somehow
-  trashed memory. (It's also possible that there is a coding error
-  in malloc. In which case, please report it!)
-*/
-
-#if ! MALLOC_DEBUG
-
-#define check_chunk(A,P)
-#define check_free_chunk(A,P)
-#define check_inuse_chunk(A,P)
-#define check_remalloced_chunk(A,P,N)
-#define check_malloced_chunk(A,P,N)
-#define check_malloc_state(A)
+   Debugging support
+
+   These routines make a number of assertions about the states
+   of data structures that should be true at all times. If any
+   are not true, it's very likely that a user program has somehow
+   trashed memory. (It's also possible that there is a coding error
+   in malloc. In which case, please report it!)
+ */
+
+#if !MALLOC_DEBUG
+
+# define check_chunk(A, P)
+# define check_free_chunk(A, P)
+# define check_inuse_chunk(A, P)
+# define check_remalloced_chunk(A, P, N)
+# define check_malloced_chunk(A, P, N)
+# define check_malloc_state(A)
 
 #else
 
-#define check_chunk(A,P)              do_check_chunk(A,P)
-#define check_free_chunk(A,P)         do_check_free_chunk(A,P)
-#define check_inuse_chunk(A,P)        do_check_inuse_chunk(A,P)
-#define check_remalloced_chunk(A,P,N) do_check_remalloced_chunk(A,P,N)
-#define check_malloced_chunk(A,P,N)   do_check_malloced_chunk(A,P,N)
-#define check_malloc_state(A)         do_check_malloc_state(A)
-
-/*
-  Properties of all chunks
-*/
-
-static void do_check_chunk(mstate av, mchunkptr p)
+# define check_chunk(A, P)              do_check_chunk (A, P)
+# define check_free_chunk(A, P)         do_check_free_chunk (A, P)
+# define check_inuse_chunk(A, P)        do_check_inuse_chunk (A, P)
+# define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N)
+# define check_malloced_chunk(A, P, N)   do_check_malloced_chunk (A, P, N)
+# define check_malloc_state(A)         do_check_malloc_state (A)
+
+/*
+   Properties of all chunks
+ */
+
+static void
+do_check_chunk (mstate av, mchunkptr p)
 {
-  unsigned long sz = chunksize(p);
+  unsigned long sz = chunksize (p);
   /* min and max possible addresses assuming contiguous allocation */
-  char* max_address = (char*)(av->top) + chunksize(av->top);
-  char* min_address = max_address - av->system_mem;
-
-  if (!chunk_is_mmapped(p)) {
-
-    /* Has legal address ... */
-    if (p != av->top) {
-      if (contiguous(av)) {
-	assert(((char*)p) >= min_address);
-	assert(((char*)p + sz) <= ((char*)(av->top)));
-      }
-    }
-    else {
-      /* top size is always at least MINSIZE */
-      assert((unsigned long)(sz) >= MINSIZE);
-      /* top predecessor always marked inuse */
-      assert(prev_inuse(p));
-    }
-
-  }
-  else {
-    /* address is outside main heap  */
-    if (contiguous(av) && av->top != initial_top(av)) {
-      assert(((char*)p) < min_address || ((char*)p) >= max_address);
-    }
-    /* chunk is page-aligned */
-    assert(((p->prev_size + sz) & (GLRO(dl_pagesize)-1)) == 0);
-    /* mem is aligned */
-    assert(aligned_OK(chunk2mem(p)));
-  }
+  char *max_address = (char *) (av->top) + chunksize (av->top);
+  char *min_address = max_address - av->system_mem;
+
+  if (!chunk_is_mmapped (p))
+    {
+      /* Has legal address ... */
+      if (p != av->top)
+        {
+          if (contiguous (av))
+            {
+              assert (((char *) p) >= min_address);
+              assert (((char *) p + sz) <= ((char *) (av->top)));
+            }
+        }
+      else
+        {
+          /* top size is always at least MINSIZE */
+          assert ((unsigned long) (sz) >= MINSIZE);
+          /* top predecessor always marked inuse */
+          assert (prev_inuse (p));
+        }
+    }
+  else
+    {
+      /* address is outside main heap  */
+      if (contiguous (av) && av->top != initial_top (av))
+        {
+          assert (((char *) p) < min_address || ((char *) p) >= max_address);
+        }
+      /* chunk is page-aligned */
+      assert (((p->prev_size + sz) & (GLRO (dl_pagesize) - 1)) == 0);
+      /* mem is aligned */
+      assert (aligned_OK (chunk2mem (p)));
+    }
 }
 
 /*
-  Properties of free chunks
-*/
-
-static void do_check_free_chunk(mstate av, mchunkptr p)
+   Properties of free chunks
+ */
+
+static void
+do_check_free_chunk (mstate av, mchunkptr p)

[... 8003 lines stripped ...]
_______________________________________________
Commits mailing list
Commits@xxxxxxxxxx
http://eglibc.org/cgi-bin/mailman/listinfo/commits