From 515a8908cedcf7432270f410e4a749e4ce07a072 Mon Sep 17 00:00:00 2001
From: Ulrich Drepper <drepper@redhat.com>
Date: Sun, 19 Jul 2009 14:54:56 -0700
Subject: [PATCH] Make x86-64 pthread_cond_timedwait more robust.

It just happens that __pthread_enable_asynccancel doesn't modify the $rdi
register.  But this isn't guaranteed.  Hence we reload the register after
the calls.
---
 nptl/ChangeLog                                               | 5 +++++
 nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S | 2 ++
 2 files changed, 7 insertions(+)

diff --git a/nptl/ChangeLog b/nptl/ChangeLog
index e9cac73459..785100d852 100644
--- a/nptl/ChangeLog
+++ b/nptl/ChangeLog
@@ -1,3 +1,8 @@
+2009-07-19  Ulrich Drepper  <drepper@redhat.com>
+
+	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
+	(__pthread_cond_timedwait): Make more robust.
+
 2009-07-18  Ulrich Drepper  <drepper@redhat.com>
 
 	* sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
index 45a9a4213b..1b19fdb8dc 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
@@ -153,6 +153,7 @@ __pthread_cond_timedwait:
 .LcleanupSTART1:
 34:	callq	__pthread_enable_asynccancel
 	movl	%eax, (%rsp)
+	movq	8(%rsp), %rdi
 
 	movq	%r13, %r10
 	cmpq	$-1, dep_mutex(%rdi)
@@ -456,6 +457,7 @@ __pthread_cond_timedwait:
 .LcleanupSTART2:
 4:	callq	__pthread_enable_asynccancel
 	movl	%eax, (%rsp)
+	movq	8(%rsp), %rdi
 
 	leaq	32(%rsp), %r10
 	cmpq	$-1, dep_mutex(%rdi)