[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[commits] r3119 - in /fsf/trunk/libc: ./ elf/ nptl/ nptl/sysdeps/unix/sysv/linux/ nptl/sysdeps/unix/sysv/linux/alpha/ nptl/sysdeps/uni...



Author: eglibc
Date: Sun Aug 12 00:02:41 2007
New Revision: 3119

Log:
Import glibc-mainline for 2007-08-12

Modified:
    fsf/trunk/libc/ChangeLog
    fsf/trunk/libc/elf/dl-misc.c
    fsf/trunk/libc/nptl/ChangeLog
    fsf/trunk/libc/nptl/pthreadP.h
    fsf/trunk/libc/nptl/pthread_cond_broadcast.c
    fsf/trunk/libc/nptl/pthread_cond_destroy.c
    fsf/trunk/libc/nptl/pthread_cond_signal.c
    fsf/trunk/libc/nptl/pthread_cond_timedwait.c
    fsf/trunk/libc/nptl/pthread_cond_wait.c
    fsf/trunk/libc/nptl/pthread_mutex_init.c
    fsf/trunk/libc/nptl/pthread_mutex_lock.c
    fsf/trunk/libc/nptl/pthread_mutex_setprioceiling.c
    fsf/trunk/libc/nptl/pthread_mutex_timedlock.c
    fsf/trunk/libc/nptl/pthread_mutex_trylock.c
    fsf/trunk/libc/nptl/pthread_mutex_unlock.c
    fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h
    fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h
    fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h
    fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/pthread-pi-defines.sym
    fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c
    fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h
    fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h
    fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
    fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S
    fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S
    fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
    fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S

Modified: fsf/trunk/libc/ChangeLog
==============================================================================
--- fsf/trunk/libc/ChangeLog (original)
+++ fsf/trunk/libc/ChangeLog Sun Aug 12 00:02:41 2007
@@ -1,8 +1,13 @@
+2007-08-11  Ulrich Drepper  <drepper@xxxxxxxxxx>
+
+	* elf/dl-misc.c (_dl_sysdep_read_whole_file): We really don't need
+	an atime update for the files we read.
+
 2007-08-10  Ulrich Drepper  <drepper@xxxxxxxxxx>
 
 	* shadow/lckpwdf.c (__lckpwdf): Use O_CLOEXEC if possible.
 
-	* nscd/connections.c: Use O_CLOEXEC is possible.  Use mkostemp
+	* nscd/connections.c: Use O_CLOEXEC if possible.  Use mkostemp
 	instead of mkstemp.
 
 	* misc/Makefile (routines): Add mkostemp and mkostemp64.

Modified: fsf/trunk/libc/elf/dl-misc.c
==============================================================================
--- fsf/trunk/libc/elf/dl-misc.c (original)
+++ fsf/trunk/libc/elf/dl-misc.c Sun Aug 12 00:02:41 2007
@@ -1,5 +1,5 @@
 /* Miscellaneous support functions for dynamic linker
-   Copyright (C) 1997-2002, 2003, 2004, 2006 Free Software Foundation, Inc.
+   Copyright (C) 1997-2004, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -55,7 +55,11 @@
 {
   void *result = MAP_FAILED;
   struct stat64 st;
-  int fd = __open (file, O_RDONLY);
+  int oflags = O_RDONLY;
+#ifdef O_NOATIME
+  oflags |= O_NOATIME;
+#endif
+  int fd = __open (file, oflags);
   if (fd >= 0)
     {
       if (__fxstat64 (_STAT_VER, fd, &st) >= 0)

Modified: fsf/trunk/libc/nptl/ChangeLog
==============================================================================
--- fsf/trunk/libc/nptl/ChangeLog (original)
+++ fsf/trunk/libc/nptl/ChangeLog Sun Aug 12 00:02:41 2007
@@ -1,3 +1,102 @@
+2007-08-11  Ulrich Drepper  <drepper@xxxxxxxxxx>
+
+	* pthreadP.h (PTHREAD_ROBUST_MUTEX_PSHARED): Define.
+	* pthread_mutex_lock.c: Use it instead of PTHREAD_MUTEX_PSHARED when
+	dealing with robust mutexes.
+	* pthread_mutex_timedlock.c: Likewise.
+	* pthread_mutex_trylock.c: Likewise.
+	* pthread_mutex_unlock.c: Likewise.
+	* sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c: Likewise.
+
+2007-08-06  Jakub Jelinek  <jakub@xxxxxxxxxx>
+
+	* pthreadP.h (PTHREAD_MUTEX_PSHARED_BIT): Define.
+	(PTHREAD_MUTEX_TYPE): Mask __kind with 127.
+	(PTHREAD_MUTEX_PSHARED): Define.
+	* pthread_mutex_init.c (__pthread_mutex_init): Set
+	PTHREAD_MUTEX_PSHARED_BIT for pshared or robust
+	mutexes.
+	* pthread_mutex_lock.c (LLL_MUTEX_LOCK): Take mutex as argument
+	instead of its __data.__lock field, pass PTHREAD_MUTEX_PSHARED
+	as second argument to lll_lock.
+	(LLL_MUTEX_TRYLOCK): Take mutex as argument
+	instead of its __data.__lock field.
+	(LLL_ROBUST_MUTEX_LOCK): Take mutex as argument instead of its
+	__data.__lock field, pass PTHREAD_MUTEX_PSHARED as second argument
+	to lll_robust_lock.
+	(__pthread_mutex_lock): Update LLL_MUTEX_LOCK, LLL_MUTEX_TRYLOCK,
+	LLL_ROBUST_MUTEX_LOCK users, use PTHREAD_MUTEX_TYPE (mutex)
+	instead of mutex->__data.__kind directly, pass
+	PTHREAD_MUTEX_PSHARED (mutex) to lll_unlock and lll_futex_wait.
+	* pthread_mutex_trylock.c (__pthread_mutex_trylock): Use
+	PTHREAD_MUTEX_TYPE (mutex) instead of mutex->__data.__kind
+	directly, pass PTHREAD_MUTEX_PSHARED (mutex) to lll_unlock.
+	(pthread_mutex_timedlock): Pass PTHREAD_MUTEX_PSHARED (mutex)
+	to lll_timedlock, lll_robust_timedlock, lll_unlock and
+	lll_futex_timed_wait.  Use PTHREAD_MUTEX_TYPE (mutex) instead
+	of mutex->__data.__kind directly.
+	* pthread_mutex_timedlock.c (pthread_mutex_timedlock): Pass
+	PTHREAD_MUTEX_PSHARED (mutex) to lll_timedlock,
+	lll_robust_timedlock, lll_unlock and lll_futex_timed_wait.  Use
+	PTHREAD_MUTEX_TYPE (mutex) instead of mutex->__data.__kind directly.
+	* pthread_mutex_unlock.c (__pthread_mutex_unlock_usercnt): Pass
+	PTHREAD_MUTEX_PSHARED (mutex) to lll_unlock, lll_robust_unlock
+	and lll_futex_wake.
+	* pthread_mutex_setprioceiling.c (pthread_mutex_setprioceiling): Pass
+	PTHREAD_MUTEX_PSHARED (mutex) to lll_futex_wait and lll_futex_wake.
+	Use PTHREAD_MUTEX_TYPE (mutex) instead of mutex->__data.__kind
+	directly.
+	* sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c (LLL_MUTEX_LOCK):
+	Take mutex as argument instead of its __data.__lock field, pass
+	PTHREAD_MUTEX_PSHARED as second argument to lll_cond_lock.
+	(LLL_MUTEX_TRYLOCK): Take mutex as argument instead of its
+	__data.__lock field.
+	(LLL_ROBUST_MUTEX_LOCK): Take mutex as argument instead of its
+	__data.__lock field, pass PTHREAD_MUTEX_PSHARED as second argument
+	to lll_robust_cond_lock.
+	* pthread_cond_broadcast.c (__pthread_cond_broadcast): Add pshared
+	variable, pass it to lll_lock, lll_unlock, lll_futex_requeue and
+	lll_futex_wake.  Don't use lll_futex_requeue if dependent mutex
+	has PTHREAD_MUTEX_PSHARED_BIT bit set in its __data.__kind.
+	* pthread_cond_destroy.c (__pthread_cond_destroy): Add pshared
+	variable, pass it to lll_lock, lll_unlock, lll_futex_wake and
+	lll_futex_wait.
+	* pthread_cond_signal.c (__pthread_cond_signal): Add pshared
+	variable, pass it to lll_lock, lll_unlock, lll_futex_wake_unlock and
+	lll_futex_wake.
+	* pthread_cond_timedwait.c (__pthread_cond_wait): Add
+	pshared variable, pass it to lll_lock, lll_unlock,
+	lll_futex_timedwait and lll_futex_wake.
+	* pthread_cond_wait.c (__condvar_cleanup, __pthread_cond_wait): Add
+	pshared variable, pass it to lll_lock, lll_unlock, lll_futex_wait
+	and lll_futex_wake.
+	* sysdeps/unix/sysv/linux/alpha/lowlevellock.h (lll_futex_requeue,
+	lll_futex_wake_unlock): Add private argument, use __lll_private_flag
+	macro.
+	* sysdeps/unix/sysv/linux/ia64/lowlevellock.h (lll_futex_requeue,
+	lll_futex_wake_unlock): Likewise.
+	* sysdeps/unix/sysv/linux/powerpc/lowlevellock.h (lll_futex_requeue):
+	Likewise.
+	* sysdeps/unix/sysv/linux/sparc/lowlevellock.h (lll_futex_requeue,
+	lll_futex_wake_unlock): Likewise.
+	* sysdeps/unix/sysv/linux/x86_64/lowlevellock.h (lll_futex_requeue):
+	Likewise.
+	* sysdeps/unix/sysv/linux/s390/lowlevellock.h (lll_futex_requeue,
+	lll_futex_wake_unlock): Likewise.
+	(lll_futex_wake): Fix a typo.
+	* sysdeps/unix/sysv/linux/pthread-pi-defines.sym (PS_BIT): Add.
+	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S
+	(__pthread_cond_broadcast): Pass LLL_PRIVATE to lll_* and or
+	FUTEX_PRIVATE_FLAG into SYS_futex op if cv is process private.
+	Don't use FUTEX_CMP_REQUEUE if dep_mutex is not process private.
+	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S
+	(__pthread_cond_signal): Pass LLL_PRIVATE to lll_* and or
+	FUTEX_PRIVATE_FLAG into SYS_futex op if cv is process private.
+	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
+	(__pthread_cond_timedwait): Likewise.
+	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S:
+	(__condvar_cleanup, __pthread_cond_wait): Likewise.
+
 2007-08-05  Jakub Jelinek  <jakub@xxxxxxxxxx>
 
 	* sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep-cancel.h (PSEUDO):

Modified: fsf/trunk/libc/nptl/pthreadP.h
==============================================================================
--- fsf/trunk/libc/nptl/pthreadP.h (original)
+++ fsf/trunk/libc/nptl/pthreadP.h Sun Aug 12 00:02:41 2007
@@ -96,9 +96,22 @@
   PTHREAD_MUTEX_PP_ADAPTIVE_NP
   = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_ADAPTIVE_NP
 };
+#define PTHREAD_MUTEX_PSHARED_BIT 128
 
 #define PTHREAD_MUTEX_TYPE(m) \
-  ((m)->__data.__kind)
+  ((m)->__data.__kind & 127)
+
+#if LLL_PRIVATE == 0 && LLL_SHARED == 128
+# define PTHREAD_MUTEX_PSHARED(m) \
+  ((m)->__data.__kind & 128)
+#else
+# define PTHREAD_MUTEX_PSHARED(m) \
+  (((m)->__data.__kind & 128) ? LLL_SHARED : LLL_PRIVATE)
+#endif
+
+/* The kernel when waking robust mutexes on exit never uses
+   FUTEX_PRIVATE_FLAG FUTEX_WAKE.  */
+#define PTHREAD_ROBUST_MUTEX_PSHARED(m) LLL_SHARED
 
 /* Ceiling in __data.__lock.  __data.__lock is signed, so don't
    use the MSB bit in there, but in the mask also include that bit,

Modified: fsf/trunk/libc/nptl/pthread_cond_broadcast.c
==============================================================================
--- fsf/trunk/libc/nptl/pthread_cond_broadcast.c (original)
+++ fsf/trunk/libc/nptl/pthread_cond_broadcast.c Sun Aug 12 00:02:41 2007
@@ -32,8 +32,10 @@
 __pthread_cond_broadcast (cond)
      pthread_cond_t *cond;
 {
+  int pshared = (cond->__data.__mutex == (void *) ~0l)
+		? LLL_SHARED : LLL_PRIVATE;
   /* Make sure we are alone.  */
-  lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
+  lll_lock (cond->__data.__lock, pshared);
 
   /* Are there any waiters to be woken?  */
   if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
@@ -47,7 +49,7 @@
       ++cond->__data.__broadcast_seq;
 
       /* We are done.  */
-      lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
+      lll_unlock (cond->__data.__lock, pshared);
 
       /* Do not use requeue for pshared condvars.  */
       if (cond->__data.__mutex == (void *) ~0l)
@@ -57,21 +59,22 @@
       pthread_mutex_t *mut = (pthread_mutex_t *) cond->__data.__mutex;
 
       /* XXX: Kernel so far doesn't support requeue to PI futex.  */
-      if (__builtin_expect (mut->__data.__kind & PTHREAD_MUTEX_PRIO_INHERIT_NP,
-			    0))
+      /* XXX: Kernel so far can only requeue to the same type of futex,
+	 in this case private (we don't requeue for pshared condvars).  */
+      if (__builtin_expect (mut->__data.__kind
+			    & (PTHREAD_MUTEX_PRIO_INHERIT_NP
+			       | PTHREAD_MUTEX_PSHARED_BIT), 0))
 	goto wake_all;
 
       /* lll_futex_requeue returns 0 for success and non-zero
 	 for errors.  */
       if (__builtin_expect (lll_futex_requeue (&cond->__data.__futex, 1,
 					       INT_MAX, &mut->__data.__lock,
-					       futex_val), 0))
+					       futex_val, LLL_PRIVATE), 0))
 	{
 	  /* The requeue functionality is not available.  */
 	wake_all:
-	  lll_futex_wake (&cond->__data.__futex, INT_MAX,
-			  // XYZ check mutex flag
-			  LLL_SHARED);
+	  lll_futex_wake (&cond->__data.__futex, INT_MAX, pshared);
 	}
 
       /* That's all.  */
@@ -79,7 +82,7 @@
     }
 
   /* We are done.  */
-  lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
+  lll_unlock (cond->__data.__lock, pshared);
 
   return 0;
 }

Modified: fsf/trunk/libc/nptl/pthread_cond_destroy.c
==============================================================================
--- fsf/trunk/libc/nptl/pthread_cond_destroy.c (original)
+++ fsf/trunk/libc/nptl/pthread_cond_destroy.c Sun Aug 12 00:02:41 2007
@@ -26,14 +26,17 @@
 __pthread_cond_destroy (cond)
      pthread_cond_t *cond;
 {
+  int pshared = (cond->__data.__mutex == (void *) ~0l)
+		? LLL_SHARED : LLL_PRIVATE;
+
   /* Make sure we are alone.  */
-  lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
+  lll_lock (cond->__data.__lock, pshared);
 
   if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
     {
       /* If there are still some waiters which have not been
 	 woken up, this is an application bug.  */
-      lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
+      lll_unlock (cond->__data.__lock, pshared);
       return EBUSY;
     }
 
@@ -60,19 +63,16 @@
 	{
 	  pthread_mutex_t *mut = (pthread_mutex_t *) cond->__data.__mutex;
 	  lll_futex_wake (&mut->__data.__lock, INT_MAX,
-			  // XYZ check mutex flag
-			  LLL_SHARED);
+			  PTHREAD_MUTEX_PSHARED (mut));
 	}
 
       do
 	{
-	  lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
+	  lll_unlock (cond->__data.__lock, pshared);
 
-	  lll_futex_wait (&cond->__data.__nwaiters, nwaiters,
-			  // XYZ check mutex flag
-			  LLL_SHARED);
+	  lll_futex_wait (&cond->__data.__nwaiters, nwaiters, pshared);
 
-	  lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
+	  lll_lock (cond->__data.__lock, pshared);
 
 	  nwaiters = cond->__data.__nwaiters;
 	}

Modified: fsf/trunk/libc/nptl/pthread_cond_signal.c
==============================================================================
--- fsf/trunk/libc/nptl/pthread_cond_signal.c (original)
+++ fsf/trunk/libc/nptl/pthread_cond_signal.c Sun Aug 12 00:02:41 2007
@@ -32,8 +32,11 @@
 __pthread_cond_signal (cond)
      pthread_cond_t *cond;
 {
+  int pshared = (cond->__data.__mutex == (void *) ~0l)
+		? LLL_SHARED : LLL_PRIVATE;
+
   /* Make sure we are alone.  */
-  lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
+  lll_lock (cond->__data.__lock, pshared);
 
   /* Are there any waiters to be woken?  */
   if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
@@ -45,18 +48,14 @@
       /* Wake one.  */
       if (! __builtin_expect (lll_futex_wake_unlock (&cond->__data.__futex, 1,
 						     1, &cond->__data.__lock,
-						     // XYZ check mutex flag
-						     LLL_SHARED),
-						     0))
+						     pshared), 0))
 	return 0;
 
-      lll_futex_wake (&cond->__data.__futex, 1,
-		      // XYZ check mutex flag
-		      LLL_SHARED);
+      lll_futex_wake (&cond->__data.__futex, 1, pshared);
     }
 
   /* We are done.  */
-  lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
+  lll_unlock (cond->__data.__lock, pshared);
 
   return 0;
 }

Modified: fsf/trunk/libc/nptl/pthread_cond_timedwait.c
==============================================================================
--- fsf/trunk/libc/nptl/pthread_cond_timedwait.c (original)
+++ fsf/trunk/libc/nptl/pthread_cond_timedwait.c Sun Aug 12 00:02:41 2007
@@ -53,14 +53,17 @@
   if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
     return EINVAL;
 
+  int pshared = (cond->__data.__mutex == (void *) ~0l)
+		? LLL_SHARED : LLL_PRIVATE;
+
   /* Make sure we are along.  */
-  lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
+  lll_lock (cond->__data.__lock, pshared);
 
   /* Now we can release the mutex.  */
   int err = __pthread_mutex_unlock_usercnt (mutex, 0);
   if (err)
     {
-      lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
+      lll_unlock (cond->__data.__lock, pshared);
       return err;
     }
 
@@ -146,22 +149,20 @@
       unsigned int futex_val = cond->__data.__futex;
 
       /* Prepare to wait.  Release the condvar futex.  */
-      lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
+      lll_unlock (cond->__data.__lock, pshared);
 
       /* Enable asynchronous cancellation.  Required by the standard.  */
       cbuffer.oldtype = __pthread_enable_asynccancel ();
 
       /* Wait until woken by signal or broadcast.  */
       err = lll_futex_timed_wait (&cond->__data.__futex,
-				  futex_val, &rt,
-				  // XYZ check mutex flag
-				  LLL_SHARED);
+				  futex_val, &rt, pshared);
 
       /* Disable asynchronous cancellation.  */
       __pthread_disable_asynccancel (cbuffer.oldtype);
 
       /* We are going to look at shared data again, so get the lock.  */
-      lll_lock(cond->__data.__lock, /* XYZ */ LLL_SHARED);
+      lll_lock (cond->__data.__lock, pshared);
 
       /* If a broadcast happened, we are done.  */
       if (cbuffer.bc_seq != cond->__data.__broadcast_seq)
@@ -198,12 +199,10 @@
      and it can be successfully destroyed.  */
   if (cond->__data.__total_seq == -1ULL
       && cond->__data.__nwaiters < (1 << COND_NWAITERS_SHIFT))
-    lll_futex_wake (&cond->__data.__nwaiters, 1,
-		    // XYZ check mutex flag
-		    LLL_SHARED);
+    lll_futex_wake (&cond->__data.__nwaiters, 1, pshared);
 
   /* We are done with the condvar.  */
-  lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
+  lll_unlock (cond->__data.__lock, pshared);
 
   /* The cancellation handling is back to normal, remove the handler.  */
   __pthread_cleanup_pop (&buffer, 0);

Modified: fsf/trunk/libc/nptl/pthread_cond_wait.c
==============================================================================
--- fsf/trunk/libc/nptl/pthread_cond_wait.c (original)
+++ fsf/trunk/libc/nptl/pthread_cond_wait.c Sun Aug 12 00:02:41 2007
@@ -43,9 +43,11 @@
   struct _condvar_cleanup_buffer *cbuffer =
     (struct _condvar_cleanup_buffer *) arg;
   unsigned int destroying;
+  int pshared = (cbuffer->cond->__data.__mutex == (void *) ~0l)
+  		? LLL_SHARED : LLL_PRIVATE;
 
   /* We are going to modify shared data.  */
-  lll_lock (cbuffer->cond->__data.__lock, /* XYZ */ LLL_SHARED);
+  lll_lock (cbuffer->cond->__data.__lock, pshared);
 
   if (cbuffer->bc_seq == cbuffer->cond->__data.__broadcast_seq)
     {
@@ -71,20 +73,16 @@
   if (cbuffer->cond->__data.__total_seq == -1ULL
       && cbuffer->cond->__data.__nwaiters < (1 << COND_NWAITERS_SHIFT))
     {
-      lll_futex_wake (&cbuffer->cond->__data.__nwaiters, 1,
-		      // XYZ check mutex flag
-		      LLL_SHARED);
+      lll_futex_wake (&cbuffer->cond->__data.__nwaiters, 1, pshared);
       destroying = 1;
     }
 
   /* We are done.  */
-  lll_unlock (cbuffer->cond->__data.__lock, /* XYZ */ LLL_SHARED);
+  lll_unlock (cbuffer->cond->__data.__lock, pshared);
 
   /* Wake everybody to make sure no condvar signal gets lost.  */
   if (! destroying)
-    lll_futex_wake (&cbuffer->cond->__data.__futex, INT_MAX,
-		    // XYZ check mutex flag
-		    LLL_SHARED);
+    lll_futex_wake (&cbuffer->cond->__data.__futex, INT_MAX, pshared);
 
   /* Get the mutex before returning unless asynchronous cancellation
      is in effect.  */
@@ -100,15 +98,17 @@
   struct _pthread_cleanup_buffer buffer;
   struct _condvar_cleanup_buffer cbuffer;
   int err;
+  int pshared = (cond->__data.__mutex == (void *) ~0l)
+  		? LLL_SHARED : LLL_PRIVATE;
 
   /* Make sure we are along.  */
-  lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
+  lll_lock (cond->__data.__lock, pshared);
 
   /* Now we can release the mutex.  */
   err = __pthread_mutex_unlock_usercnt (mutex, 0);
   if (__builtin_expect (err, 0))
     {
-      lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
+      lll_unlock (cond->__data.__lock, pshared);
       return err;
     }
 
@@ -144,21 +144,19 @@
       unsigned int futex_val = cond->__data.__futex;
 
       /* Prepare to wait.  Release the condvar futex.  */
-      lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
+      lll_unlock (cond->__data.__lock, pshared);
 
       /* Enable asynchronous cancellation.  Required by the standard.  */
       cbuffer.oldtype = __pthread_enable_asynccancel ();
 
       /* Wait until woken by signal or broadcast.  */
-      lll_futex_wait (&cond->__data.__futex, futex_val,
-		      // XYZ check mutex flag
-		      LLL_SHARED);
+      lll_futex_wait (&cond->__data.__futex, futex_val, pshared);
 
       /* Disable asynchronous cancellation.  */
       __pthread_disable_asynccancel (cbuffer.oldtype);
 
       /* We are going to look at shared data again, so get the lock.  */
-      lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
+      lll_lock (cond->__data.__lock, pshared);
 
       /* If a broadcast happened, we are done.  */
       if (cbuffer.bc_seq != cond->__data.__broadcast_seq)
@@ -181,12 +179,10 @@
      and it can be successfully destroyed.  */
   if (cond->__data.__total_seq == -1ULL
       && cond->__data.__nwaiters < (1 << COND_NWAITERS_SHIFT))
-    lll_futex_wake (&cond->__data.__nwaiters, 1,
-		    // XYZ check mutex flag
-		    LLL_SHARED);
+    lll_futex_wake (&cond->__data.__nwaiters, 1, pshared);
 
   /* We are done with the condvar.  */
-  lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
+  lll_unlock (cond->__data.__lock, pshared);
 
   /* The cancellation handling is back to normal, remove the handler.  */
   __pthread_cleanup_pop (&buffer, 0);

Modified: fsf/trunk/libc/nptl/pthread_mutex_init.c
==============================================================================
--- fsf/trunk/libc/nptl/pthread_mutex_init.c (original)
+++ fsf/trunk/libc/nptl/pthread_mutex_init.c Sun Aug 12 00:02:41 2007
@@ -1,4 +1,5 @@
-/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
+   Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@xxxxxxxxxx>, 2002.
 
@@ -120,6 +121,12 @@
       break;
     }
 
+  /* The kernel when waking robust mutexes on exit never uses
+     FUTEX_PRIVATE_FLAG FUTEX_WAKE.  */
+  if ((imutexattr->mutexkind & (PTHREAD_MUTEXATTR_FLAG_PSHARED
+				| PTHREAD_MUTEXATTR_FLAG_ROBUST)) != 0)
+    mutex->__data.__kind |= PTHREAD_MUTEX_PSHARED_BIT;
+
   /* Default values: mutex not used yet.  */
   // mutex->__count = 0;	already done by memset
   // mutex->__owner = 0;	already done by memset

Modified: fsf/trunk/libc/nptl/pthread_mutex_lock.c
==============================================================================
--- fsf/trunk/libc/nptl/pthread_mutex_lock.c (original)
+++ fsf/trunk/libc/nptl/pthread_mutex_lock.c Sun Aug 12 00:02:41 2007
@@ -27,9 +27,13 @@
 
 
 #ifndef LLL_MUTEX_LOCK
-# define LLL_MUTEX_LOCK(mutex) lll_lock (mutex, /* XYZ */ LLL_SHARED)
-# define LLL_MUTEX_TRYLOCK(mutex) lll_trylock (mutex)
-# define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_lock (mutex, id, /* XYZ */ LLL_SHARED)
+# define LLL_MUTEX_LOCK(mutex) \
+  lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
+# define LLL_MUTEX_TRYLOCK(mutex) \
+  lll_trylock ((mutex)->__data.__lock)
+# define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
+  lll_robust_lock ((mutex)->__data.__lock, id, \
+		   PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
 #endif
 
 
@@ -62,7 +66,7 @@
 	}
 
       /* We have to get the mutex.  */
-      LLL_MUTEX_LOCK (mutex->__data.__lock);
+      LLL_MUTEX_LOCK (mutex);
 
       assert (mutex->__data.__owner == 0);
       mutex->__data.__count = 1;
@@ -79,7 +83,7 @@
     case PTHREAD_MUTEX_TIMED_NP:
     simple:
       /* Normal mutex.  */
-      LLL_MUTEX_LOCK (mutex->__data.__lock);
+      LLL_MUTEX_LOCK (mutex);
       assert (mutex->__data.__owner == 0);
       break;
 
@@ -87,7 +91,7 @@
       if (! __is_smp)
 	goto simple;
 
-      if (LLL_MUTEX_TRYLOCK (mutex->__data.__lock) != 0)
+      if (LLL_MUTEX_TRYLOCK (mutex) != 0)
 	{
 	  int cnt = 0;
 	  int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
@@ -96,7 +100,7 @@
 	    {
 	      if (cnt++ >= max_cnt)
 		{
-		  LLL_MUTEX_LOCK (mutex->__data.__lock);
+		  LLL_MUTEX_LOCK (mutex);
 		  break;
 		}
 
@@ -104,7 +108,7 @@
 	      BUSY_WAIT_NOP;
 #endif
 	    }
-	  while (LLL_MUTEX_TRYLOCK (mutex->__data.__lock) != 0);
+	  while (LLL_MUTEX_TRYLOCK (mutex) != 0);
 
 	  mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
 	}
@@ -166,16 +170,15 @@
 	  /* Check whether we already hold the mutex.  */
 	  if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
 	    {
-	      if (mutex->__data.__kind
-		  == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
+	      int kind = PTHREAD_MUTEX_TYPE (mutex);
+	      if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
 		{
 		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
 				 NULL);
 		  return EDEADLK;
 		}
 
-	      if (mutex->__data.__kind
-		  == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
+	      if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
 		{
 		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
 				 NULL);
@@ -191,14 +194,15 @@
 		}
 	    }
 
-	  oldval = LLL_ROBUST_MUTEX_LOCK (mutex->__data.__lock, id);
+	  oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id);
 
 	  if (__builtin_expect (mutex->__data.__owner
 				== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
 	    {
 	      /* This mutex is now not recoverable.  */
 	      mutex->__data.__count = 0;
-	      lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
+	      lll_unlock (mutex->__data.__lock,
+			  PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
 	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
 	      return ENOTRECOVERABLE;
 	    }
@@ -410,8 +414,7 @@
 
 		if (oldval != ceilval)
 		  lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
-				  // XYZ check mutex flag
-				  LLL_SHARED);
+				  PTHREAD_MUTEX_PSHARED (mutex));
 	      }
 	    while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
 							ceilval | 2, ceilval)

Modified: fsf/trunk/libc/nptl/pthread_mutex_setprioceiling.c
==============================================================================
--- fsf/trunk/libc/nptl/pthread_mutex_setprioceiling.c (original)
+++ fsf/trunk/libc/nptl/pthread_mutex_setprioceiling.c Sun Aug 12 00:02:41 2007
@@ -47,12 +47,13 @@
 
   /* Check whether we already hold the mutex.  */
   bool locked = false;
+  int kind = PTHREAD_MUTEX_TYPE (mutex);
   if (mutex->__data.__owner == THREAD_GETMEM (THREAD_SELF, tid))
     {
-      if (mutex->__data.__kind == PTHREAD_MUTEX_PP_ERRORCHECK_NP)
+      if (kind == PTHREAD_MUTEX_PP_ERRORCHECK_NP)
 	return EDEADLK;
 
-      if (mutex->__data.__kind == PTHREAD_MUTEX_PP_RECURSIVE_NP)
+      if (kind == PTHREAD_MUTEX_PP_RECURSIVE_NP)
 	locked = true;
     }
 
@@ -81,8 +82,7 @@
 
 	    if (oldval != ceilval)
 	      lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
-			      // XYZ check mutex flag
-			      LLL_SHARED);
+			      PTHREAD_MUTEX_PSHARED (mutex));
 	  }
 	while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
 						    ceilval | 2, ceilval)
@@ -113,8 +113,7 @@
   atomic_full_barrier ();
 
   lll_futex_wake (&mutex->__data.__lock, INT_MAX,
-		  // XYZ check mutex flag
-		  LLL_SHARED);
+		  PTHREAD_MUTEX_PSHARED (mutex));
 
   return 0;
 }

Modified: fsf/trunk/libc/nptl/pthread_mutex_timedlock.c
==============================================================================
--- fsf/trunk/libc/nptl/pthread_mutex_timedlock.c (original)
+++ fsf/trunk/libc/nptl/pthread_mutex_timedlock.c Sun Aug 12 00:02:41 2007
@@ -57,7 +57,7 @@
 
       /* We have to get the mutex.  */
       result = lll_timedlock (mutex->__data.__lock, abstime,
-			      /* XYZ */ LLL_SHARED);
+			      PTHREAD_MUTEX_PSHARED (mutex));
 
       if (result != 0)
 	goto out;
@@ -78,7 +78,7 @@
     simple:
       /* Normal mutex.  */
       result = lll_timedlock (mutex->__data.__lock, abstime,
-			      /* XYZ */ LLL_SHARED);
+			      PTHREAD_MUTEX_PSHARED (mutex));
       break;
 
     case PTHREAD_MUTEX_ADAPTIVE_NP:
@@ -95,7 +95,7 @@
 	      if (cnt++ >= max_cnt)
 		{
 		  result = lll_timedlock (mutex->__data.__lock, abstime,
-					  /* XYZ */ LLL_SHARED);
+					  PTHREAD_MUTEX_PSHARED (mutex));
 		  break;
 		}
 
@@ -152,16 +152,15 @@
 	  /* Check whether we already hold the mutex.  */
 	  if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
 	    {
-	      if (mutex->__data.__kind
-		  == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
+	      int kind = PTHREAD_MUTEX_TYPE (mutex);
+	      if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
 		{
 		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
 				 NULL);
 		  return EDEADLK;
 		}
 
-	      if (mutex->__data.__kind
-		  == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
+	      if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
 		{
 		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
 				 NULL);
@@ -178,14 +177,15 @@
 	    }
 
 	  result = lll_robust_timedlock (mutex->__data.__lock, abstime, id,
-					 /* XYZ */ LLL_SHARED);
+					 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
 
 	  if (__builtin_expect (mutex->__data.__owner
 				== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
 	    {
 	      /* This mutex is now not recoverable.  */
 	      mutex->__data.__count = 0;
-	      lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
+	      lll_unlock (mutex->__data.__lock,
+			  PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
 	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
 	      return ENOTRECOVERABLE;
 	    }
@@ -446,8 +446,7 @@
 
 		    lll_futex_timed_wait (&mutex->__data.__lock,
 					  ceilval | 2, &rt,
-					  // XYZ check mutex flag
-					  LLL_SHARED);
+					  PTHREAD_MUTEX_PSHARED (mutex));
 		  }
 	      }
 	    while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,

Modified: fsf/trunk/libc/nptl/pthread_mutex_trylock.c
==============================================================================
--- fsf/trunk/libc/nptl/pthread_mutex_trylock.c (original)
+++ fsf/trunk/libc/nptl/pthread_mutex_trylock.c Sun Aug 12 00:02:41 2007
@@ -115,16 +115,15 @@
 	  /* Check whether we already hold the mutex.  */
 	  if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
 	    {
-	      if (mutex->__data.__kind
-		  == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
+	      int kind = PTHREAD_MUTEX_TYPE (mutex);
+	      if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
 		{
 		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
 				 NULL);
 		  return EDEADLK;
 		}
 
-	      if (mutex->__data.__kind
-		  == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
+	      if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
 		{
 		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
 				 NULL);
@@ -154,7 +153,8 @@
 	      /* This mutex is now not recoverable.  */
 	      mutex->__data.__count = 0;
 	      if (oldval == id)
-		lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
+		lll_unlock (mutex->__data.__lock,
+			    PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
 	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
 	      return ENOTRECOVERABLE;
 	    }

Modified: fsf/trunk/libc/nptl/pthread_mutex_unlock.c
==============================================================================
--- fsf/trunk/libc/nptl/pthread_mutex_unlock.c (original)
+++ fsf/trunk/libc/nptl/pthread_mutex_unlock.c Sun Aug 12 00:02:41 2007
@@ -61,7 +61,7 @@
 	--mutex->__data.__nusers;
 
       /* Unlock.  */
-      lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
+      lll_unlock (mutex->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex));
       break;
 
     case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
@@ -115,7 +115,8 @@
 	--mutex->__data.__nusers;
 
       /* Unlock.  */
-      lll_robust_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
+      lll_robust_unlock (mutex->__data.__lock,
+			 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
 
       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
       break;
@@ -242,8 +243,7 @@
 
       if ((oldval & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) > 1)
 	lll_futex_wake (&mutex->__data.__lock, 1,
-			// XYZ check mutex flag
-			LLL_SHARED);
+			PTHREAD_MUTEX_PSHARED (mutex));
 
       int oldprio = newval >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
       return __pthread_tpp_change_priority (oldprio, -1);

Modified: fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h
==============================================================================
--- fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h (original)
+++ fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h Sun Aug 12 00:02:41 2007
@@ -103,24 +103,24 @@
   while (0)
 
 /* Returns non-zero if error happened, zero if success.  */
-#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \
-  ({									      \
-    INTERNAL_SYSCALL_DECL (__err);					      \
-    long int __ret;							      \
-    __ret = INTERNAL_SYSCALL (futex, __err, 6,				      \
-			      (futexp), FUTEX_CMP_REQUEUE, (nr_wake),	      \
-			      (nr_move), (mutex), (val));		      \
+#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \
+  ({									      \
+    INTERNAL_SYSCALL_DECL (__err);					      \
+    long int __ret;							      \
+    __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp),		      \
+			      __lll_private_flag (FUTEX_CMP_REQUEUE, private),\
+			      (nr_wake), (nr_move), (mutex), (val));	      \
     INTERNAL_SYSCALL_ERROR_P (__ret, __err);				      \
   })
 
 /* Returns non-zero if error happened, zero if success.  */
-#define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2) \
-  ({									      \
-    INTERNAL_SYSCALL_DECL (__err);					      \
-    long int __ret;							      \
-    __ret = INTERNAL_SYSCALL (futex, __err, 6,				      \
-			      (futexp), FUTEX_WAKE_OP, (nr_wake),	      \
-			      (nr_wake2), (futexp2),			      \
+#define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \
+  ({									      \
+    INTERNAL_SYSCALL_DECL (__err);					      \
+    long int __ret;							      \
+    __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp),		      \
+			      __lll_private_flag (FUTEX_WAKE_OP, private),    \
+			      (nr_wake), (nr_wake2), (futexp2),		      \
 			      FUTEX_OP_CLEAR_WAKE_IF_GT_ONE);		      \
     INTERNAL_SYSCALL_ERROR_P (__ret, __err);				      \
   })

Modified: fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h
==============================================================================
--- fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h (original)
+++ fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h Sun Aug 12 00:02:41 2007
@@ -103,18 +103,20 @@
 while (0)
 
 /* Returns non-zero if error happened, zero if success.  */
-#define lll_futex_requeue(ftx, nr_wake, nr_move, mutex, val)		     \
+#define lll_futex_requeue(ftx, nr_wake, nr_move, mutex, val, private)	     \
 ({									     \
-   DO_INLINE_SYSCALL(futex, 6, (long) (ftx), FUTEX_CMP_REQUEUE,		     \
+   DO_INLINE_SYSCALL(futex, 6, (long) (ftx),				     \
+		     __lll_private_flag (FUTEX_CMP_REQUEUE, private),	     \
 		     (int) (nr_wake), (int) (nr_move), (long) (mutex),	     \
 		     (int) val);					     \
    _r10 == -1;								     \
 })
 
 /* Returns non-zero if error happened, zero if success.  */
-#define lll_futex_wake_unlock(ftx, nr_wake, nr_wake2, ftx2)		     \
+#define lll_futex_wake_unlock(ftx, nr_wake, nr_wake2, ftx2, private)	     \
 ({									     \
-   DO_INLINE_SYSCALL(futex, 6, (long) (ftx), FUTEX_WAKE_OP,		     \
+   DO_INLINE_SYSCALL(futex, 6, (long) (ftx),				     \
+		     __lll_private_flag (FUTEX_WAKE_OP, private),	     \
 		     (int) (nr_wake), (int) (nr_wake2), (long) (ftx2),	     \
 		     FUTEX_OP_CLEAR_WAKE_IF_GT_ONE);			     \
    _r10 == -1;								     \

Modified: fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h
==============================================================================
--- fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h (original)
+++ fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h Sun Aug 12 00:02:41 2007
@@ -107,14 +107,14 @@
   while (0)
 
 /* Returns non-zero if error happened, zero if success.  */
-#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \
+#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \
   ({									      \
     INTERNAL_SYSCALL_DECL (__err);					      \
     long int __ret;							      \
 									      \
-    __ret = INTERNAL_SYSCALL (futex, __err, 6,				      \
-			      (futexp), FUTEX_CMP_REQUEUE, (nr_wake),	      \
-			      (nr_move), (mutex), (val));		      \
+    __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp),		      \
+			      __lll_private_flag (FUTEX_CMP_REQUEUE, private),\
+			      (nr_wake), (nr_move), (mutex), (val));	      \
     INTERNAL_SYSCALL_ERROR_P (__ret, __err);				      \
   })
 

Modified: fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/pthread-pi-defines.sym
==============================================================================
--- fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/pthread-pi-defines.sym (original)
+++ fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/pthread-pi-defines.sym Sun Aug 12 00:02:41 2007
@@ -4,3 +4,4 @@
 
 MUTEX_KIND	offsetof (pthread_mutex_t, __data.__kind)
 PI_BIT		PTHREAD_MUTEX_PRIO_INHERIT_NP
+PS_BIT		PTHREAD_MUTEX_PSHARED_BIT

Modified: fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c
==============================================================================
--- fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c (original)
+++ fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c Sun Aug 12 00:02:41 2007
@@ -1,8 +1,12 @@
 #include <pthreadP.h>
 
-#define LLL_MUTEX_LOCK(mutex) lll_cond_lock (mutex, /* XYZ */ LLL_SHARED)
-#define LLL_MUTEX_TRYLOCK(mutex) lll_cond_trylock (mutex)
-#define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_cond_lock (mutex, id, /* XYZ */ LLL_SHARED)
+#define LLL_MUTEX_LOCK(mutex) \
+  lll_cond_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
+#define LLL_MUTEX_TRYLOCK(mutex) \
+  lll_cond_trylock ((mutex)->__data.__lock)
+#define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
+  lll_robust_cond_lock ((mutex)->__data.__lock, id, \
+			PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
 #define __pthread_mutex_lock __pthread_mutex_cond_lock
 #define NO_INCR
 

Modified: fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h
==============================================================================
--- fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h (original)
+++ fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h Sun Aug 12 00:02:41 2007
@@ -93,7 +93,7 @@
   ({									      \
     register unsigned long int __r2 asm ("2") = (unsigned long int) (futex);  \
     register unsigned long int __r3 asm ("3")				      \
-      __lll_private_flag (FUTEX_WAKE, private);				      \
+      = __lll_private_flag (FUTEX_WAKE, private);			      \
     register unsigned long int __r4 asm ("4") = (unsigned long int) (nr);     \
     register unsigned long int __result asm ("2");			      \
 									      \
@@ -117,10 +117,11 @@
 
 
 /* Returns non-zero if error happened, zero if success.  */
-#define lll_futex_requeue(futex, nr_wake, nr_move, mutex, val) \
+#define lll_futex_requeue(futex, nr_wake, nr_move, mutex, val, private) \
   ({									      \
     register unsigned long int __r2 asm ("2") = (unsigned long int) (futex);  \
-    register unsigned long int __r3 asm ("3") = FUTEX_CMP_REQUEUE;	      \
+    register unsigned long int __r3 asm ("3")				      \
+      = __lll_private_flag (FUTEX_CMP_REQUEUE, private);		      \
     register unsigned long int __r4 asm ("4") = (long int) (nr_wake);	      \
     register unsigned long int __r5 asm ("5") = (long int) (nr_move);	      \
     register unsigned long int __r6 asm ("6") = (unsigned long int) (mutex);  \
@@ -137,10 +138,11 @@
 
 
 /* Returns non-zero if error happened, zero if success.  */
-#define lll_futex_wake_unlock(futex, nr_wake, nr_wake2, futex2) \
+#define lll_futex_wake_unlock(futex, nr_wake, nr_wake2, futex2, private) \
   ({									      \
     register unsigned long int __r2 asm ("2") = (unsigned long int) (futex);  \
-    register unsigned long int __r3 asm ("3") = FUTEX_WAKE_OP;	      \
+    register unsigned long int __r3 asm ("3")				      \
+      = __lll_private_flag (FUTEX_WAKE_OP, private);			      \
     register unsigned long int __r4 asm ("4") = (long int) (nr_wake);	      \
     register unsigned long int __r5 asm ("5") = (long int) (nr_wake2);	      \
     register unsigned long int __r6 asm ("6") = (unsigned long int) (futex2); \

Modified: fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h
==============================================================================
--- fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h (original)
+++ fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h Sun Aug 12 00:02:41 2007
@@ -96,14 +96,14 @@
   })
 
 /* Returns non-zero if error happened, zero if success.  */
-#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \
-  ({									      \
-    INTERNAL_SYSCALL_DECL (__err);					      \
-    long int __ret;							      \
-									      \
-    __ret = INTERNAL_SYSCALL (futex, __err, 6,				      \
-			      (futexp), FUTEX_CMP_REQUEUE, (nr_wake),	      \
-			      (nr_move), (mutex), (val));		      \
+#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \
+  ({									      \
+    INTERNAL_SYSCALL_DECL (__err);					      \
+    long int __ret;							      \
+									      \
+    __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp),		      \
+			      __lll_private_flag (FUTEX_CMP_REQUEUE, private),\
+			      (nr_wake), (nr_move), (mutex), (val));	      \
     INTERNAL_SYSCALL_ERROR_P (__ret, __err);				      \
   })
 
@@ -121,14 +121,14 @@
 /* Avoid FUTEX_WAKE_OP if supporting pre-v9 CPUs.  */
 # define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2) 1
 #else
-# define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2) \
-  ({									      \
-    INTERNAL_SYSCALL_DECL (__err);					      \
-    long int __ret;							      \
-									      \
-    __ret = INTERNAL_SYSCALL (futex, __err, 6,				      \
-			      (futexp), FUTEX_WAKE_OP, (nr_wake),	      \
-			      (nr_wake2), (futexp2),			      \
+# define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \
+  ({									      \
+    INTERNAL_SYSCALL_DECL (__err);					      \
+    long int __ret;							      \
+									      \
+    __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp),		      \
+			      __lll_private_flag (FUTEX_WAKE_OP, private),    \
+			      (nr_wake), (nr_wake2), (futexp2),		      \
 			      FUTEX_OP_CLEAR_WAKE_IF_GT_ONE);		      \
     INTERNAL_SYSCALL_ERROR_P (__ret, __err);				      \
   })

Modified: fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
==============================================================================
--- fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h (original)
+++ fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h Sun Aug 12 00:02:41 2007
@@ -533,7 +533,7 @@
   while (0)
 
 /* Returns non-zero if error happened, zero if success.  */
-#define lll_futex_requeue(ftx, nr_wake, nr_move, mutex, val) \
+#define lll_futex_requeue(ftx, nr_wake, nr_move, mutex, val, private) \
   ({ int __res;								      \
      register int __nr_move __asm ("r10") = nr_move;			      \
      register void *__mutex __asm ("r8") = mutex;			      \
@@ -541,7 +541,8 @@
      __asm __volatile ("syscall"					      \
 		       : "=a" (__res)					      \
 		       : "0" (__NR_futex), "D" ((void *) ftx),		      \
-			 "S" (FUTEX_CMP_REQUEUE), "d" (nr_wake),	      \
+			 "S" (__lll_private_flag (FUTEX_CMP_REQUEUE,	      \
+						  private)), "d" (nr_wake),   \
 			 "r" (__nr_move), "r" (__mutex), "r" (__val)	      \
 		       : "cx", "r11", "cc", "memory");			      \
      __res < 0; })

Modified: fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S
==============================================================================
--- fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S (original)
+++ fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S Sun Aug 12 00:02:41 2007
@@ -71,11 +71,18 @@
 	je	9f
 
 	/* XXX: The kernel so far doesn't support requeue to PI futex.  */
-	testl	$PI_BIT, MUTEX_KIND(%r8)
+	/* XXX: The kernel only supports FUTEX_CMP_REQUEUE to the same
+	   type of futex (private resp. shared).  */
+	testl	$(PI_BIT | PS_BIT), MUTEX_KIND(%r8)
 	jne	9f
 
 	/* Wake up all threads.  */
-	movl	$FUTEX_CMP_REQUEUE, %esi
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$(FUTEX_CMP_REQUEUE|FUTEX_PRIVATE_FLAG), %esi
+#else
+	movl	%fs:PRIVATE_FUTEX, %esi
+	orl	$FUTEX_CMP_REQUEUE, %esi
+#endif
 	movl	$SYS_futex, %eax
 	movl	$1, %edx
 	movl	$0x7fffffff, %r10d
@@ -104,8 +111,10 @@
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	/* XYZ */
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
 	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
 	callq	__lll_lock_wait
 #if cond_lock != 0
 	subq	$cond_lock, %rdi
@@ -114,22 +123,36 @@
 
 	/* Unlock in loop requires wakeup.  */
 5:	addq	$cond_lock-cond_futex, %rdi
-	/* XYZ */
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
 	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
 	callq	__lll_unlock_wake
 	jmp	6b
 
 	/* Unlock in loop requires wakeup.  */
 7:	addq	$cond_lock-cond_futex, %rdi
-	/* XYZ */
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
 	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
 	callq	__lll_unlock_wake
 	subq	$cond_lock-cond_futex, %rdi
 	jmp	8b
 
 9:	/* The futex requeue functionality is not available.  */
+	cmpq	$-1, dep_mutex-cond_futex(%rdi)
 	movl	$0x7fffffff, %edx
-	movl	$FUTEX_WAKE, %esi
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAKE, %eax
+	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+	cmove	%eax, %esi
+#else
+	movl	$0, %eax
+	movl	%fs:PRIVATE_FUTEX, %esi
+	cmove	%eax, %esi
+	orl	$FUTEX_WAKE, %esi
+#endif
 	movl	$SYS_futex, %eax
 	syscall
 	jmp	10b

Modified: fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S
==============================================================================
--- fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S (original)
+++ fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S Sun Aug 12 00:02:41 2007
@@ -55,10 +55,20 @@
 	addl	$1, (%rdi)
 
 	/* Wake up one thread.  */
-	movl	$FUTEX_WAKE_OP, %esi
+	cmpq	$-1, dep_mutex(%r8)
+	movl	$1, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAKE_OP, %eax
+	movl	$(FUTEX_WAKE_OP|FUTEX_PRIVATE_FLAG), %esi
+	cmove	%eax, %esi
+#else
+	movl	$0, %eax
+	movl	%fs:PRIVATE_FUTEX, %esi
+	cmove	%eax, %esi
+	orl	$FUTEX_WAKE_OP, %esi
+#endif
+	movl	$1, %r10d
 	movl	$SYS_futex, %eax
-	movl	$1, %edx
-	movl	$1, %r10d
 #if cond_lock != 0
 	addq	$cond_lock, %r8
 #endif
@@ -75,7 +85,9 @@
 	xorl	%eax, %eax
 	retq
 
-7:	movl	$FUTEX_WAKE, %esi
+7:	/* %esi should be either FUTEX_WAKE_OP or
+	   FUTEX_WAKE_OP|FUTEX_PRIVATE_FLAG from the previous syscall.  */
+	xorl	$(FUTEX_WAKE | FUTEX_WAKE_OP), %esi
 	movl	$SYS_futex, %eax
 	/* %rdx should be 1 already from $FUTEX_WAKE_OP syscall.
 	movl	$1, %edx  */
@@ -98,8 +110,10 @@
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	/* XYZ */
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
 	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
 	callq	__lll_lock_wait
 #if cond_lock != 0
 	subq	$cond_lock, %rdi
@@ -109,8 +123,13 @@
 	/* Unlock in loop requires wakeup.  */
 5:
 	movq	%r8, %rdi
-	/* XYZ */
+#if cond_lock != 0
+	addq	$cond_lock, %rdi
+#endif
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
 	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
 	callq	__lll_unlock_wake
 	jmp	6b
 	.size	__pthread_cond_signal, .-__pthread_cond_signal

Modified: fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
==============================================================================
--- fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S (original)
+++ fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S Sun Aug 12 00:02:41 2007
@@ -186,12 +186,20 @@
 	movl	%eax, (%rsp)
 
 	leaq	24(%rsp), %r10
-#if FUTEX_WAIT == 0
-	xorl	%esi, %esi
-#else
-	movl	$FUTEX_WAIT, %esi
-#endif
+	cmpq	$-1, dep_mutex(%rdi)
 	movq	%r12, %rdx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAIT, %eax
+	movl	$(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), %esi
+	cmove	%eax, %esi
+#else
+	movl	$0, %eax
+	movl	%fs:PRIVATE_FUTEX, %esi
+	cmove	%eax, %esi
+# if FUTEX_WAIT != 0
+	orl	$FUTEX_WAIT, %esi
+# endif
+#endif
 	addq	$cond_futex, %rdi
 	movl	$SYS_futex, %eax
 	syscall
@@ -251,9 +259,19 @@
 	jne	25f
 
 	addq	$cond_nwaiters, %rdi
+	cmpq	$-1, dep_mutex-cond_nwaiters(%rdi)
+	movl	$1, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAKE, %eax
+	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+	cmove	%eax, %esi
+#else
+	movl	$0, %eax
+	movl	%fs:PRIVATE_FUTEX, %esi
+	cmove	%eax, %esi
+	orl	$FUTEX_WAKE, %esi
+#endif
 	movl	$SYS_futex, %eax
-	movl	$FUTEX_WAKE, %esi
-	movl	$1, %edx
 	syscall
 	subq	$cond_nwaiters, %rdi
 
@@ -292,8 +310,10 @@
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	/* XYZ */
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
 	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
 	callq	__lll_lock_wait
 	jmp	2b
 
@@ -302,8 +322,10 @@
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	/* XYZ */
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
 	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
 	callq	__lll_unlock_wake
 	jmp	4b
 
@@ -312,8 +334,10 @@
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	/* XYZ */
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
 	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
 	callq	__lll_lock_wait
 #if cond_lock != 0
 	subq	$cond_lock, %rdi
@@ -325,8 +349,10 @@
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	/* XYZ */
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
 	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
 	callq	__lll_unlock_wake
 	jmp	11b
 
@@ -344,8 +370,10 @@
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	/* XYZ */
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
 	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
 	callq	__lll_unlock_wake
 
 17:	movq	(%rsp), %rax

Modified: fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
==============================================================================
--- fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S (original)
+++ fsf/trunk/libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S Sun Aug 12 00:02:41 2007
@@ -49,8 +49,10 @@
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	/* XYZ */
-	movl	$LLL_SHARED, %esi
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
 	callq	__lll_lock_wait
 #if cond_lock != 0
 	subq	$cond_lock, %rdi
@@ -81,9 +83,19 @@
 	jne	4f
 
 	addq	$cond_nwaiters, %rdi
+	cmpq	$-1, dep_mutex-cond_nwaiters(%rdi)
+	movl	$1, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAKE, %eax
+	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+	cmove	%eax, %esi
+#else
+	movl	$0, %eax
+	movl	%fs:PRIVATE_FUTEX, %esi
+	cmove	%eax, %esi
+	orl	$FUTEX_WAKE, %esi
+#endif
 	movl	$SYS_futex, %eax
-	movl	$FUTEX_WAKE, %esi
-	movl	$1, %edx
 	syscall
 	subq	$cond_nwaiters, %rdi
 	movl	$1, %r12d
@@ -98,16 +110,28 @@
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	/* XYZ */
-	movl	$LLL_SHARED, %esi
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
 	callq	__lll_unlock_wake
 
 	/* Wake up all waiters to make sure no signal gets lost.  */
 2:	testq	%r12, %r12
 	jnz	5f
 	addq	$cond_futex, %rdi
-	movl	$FUTEX_WAKE, %esi
+	cmpq	$-1, dep_mutex-cond_futex(%rdi)
 	movl	$0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAKE, %eax
+	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+	cmove	%eax, %esi
+#else
+	movl	$0, %eax
+	movl	%fs:PRIVATE_FUTEX, %esi
+	cmove	%eax, %esi
+	orl	$FUTEX_WAKE, %esi
+#endif
 	movl	$SYS_futex, %eax
 	syscall
 
@@ -216,12 +240,20 @@
 	xorq	%r10, %r10
 	movq	%r12, %rdx
 	addq	$cond_futex-cond_lock, %rdi
+	cmpq	$-1, dep_mutex-cond_futex(%rdi)
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAIT, %eax
+	movl	$(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), %esi
+	cmove	%eax, %esi
+#else
+	movl	$FUTEX_WAIT, %eax
+	movl	%fs:PRIVATE_FUTEX, %esi
+	cmove	%eax, %esi
+# if FUTEX_WAIT != 0
+	orl	$FUTEX_WAIT, %esi
+# endif
+#endif
 	movl	$SYS_futex, %eax
-#if FUTEX_WAIT == 0
-	xorl	%esi, %esi
-#else
-	movl	$FUTEX_WAIT, %esi
-#endif
 	syscall
 
 	movl	(%rsp), %edi
@@ -267,9 +299,19 @@
 	jne	17f
 
 	addq	$cond_nwaiters, %rdi
+	cmpq	$-1, dep_mutex-cond_nwaiters(%rdi)
+	movl	$1, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAKE, %eax
+	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+	cmove	%eax, %esi
+#else
+	movl	$0, %eax
+	movl	%fs:PRIVATE_FUTEX, %esi
+	cmove	%eax, %esi
+	orl	$FUTEX_WAKE, %esi
+#endif
 	movl	$SYS_futex, %eax
-	movl	$FUTEX_WAKE, %esi
-	movl	$1, %edx
 	syscall
 	subq	$cond_nwaiters, %rdi
 
@@ -302,8 +344,10 @@
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	/* XYZ */
-	movl	$LLL_SHARED, %esi
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
 	callq	__lll_lock_wait
 	jmp	2b
 
@@ -312,8 +356,10 @@
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	/* XYZ */
-	movl	$LLL_SHARED, %esi
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
 	callq	__lll_unlock_wake
 	jmp	4b
 
@@ -322,8 +368,10 @@
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	/* XYZ */
-	movl	$LLL_SHARED, %esi
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
 	callq	__lll_lock_wait
 #if cond_lock != 0
 	subq	$cond_lock, %rdi
@@ -335,8 +383,10 @@
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	/* XYZ */
-	movl	$LLL_SHARED, %esi
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
 	callq	__lll_unlock_wake
 	jmp	11b
 
@@ -354,8 +404,10 @@
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	/* XYZ */
-	movl	$LLL_SHARED, %esi
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
 	callq	__lll_unlock_wake
 
 13:	movq	%r10, %rax