Skip to content

Commit

Permalink
* sysdeps/unix/sysv/linux/kernel-features.h: Add
Browse files Browse the repository at this point in the history
	__ASSUME_SET_ROBUST_LIST.
  • Loading branch information
Ulrich Drepper committed Mar 28, 2006
1 parent 5b20043 commit 0f6699e
Show file tree
Hide file tree
Showing 16 changed files with 520 additions and 123 deletions.
5 changes: 5 additions & 0 deletions ChangeLog
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
2006-03-27 Ulrich Drepper <drepper@redhat.com>

* sysdeps/unix/sysv/linux/kernel-features.h: Add
__ASSUME_SET_ROBUST_LIST.

2006-03-27 Jakub Jelinek <jakub@redhat.com>

* wcsmbs/wchar.h (btowc, wctob): Don't optimize in C++.
Expand Down
2 changes: 1 addition & 1 deletion nptl/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ tests = tst-typesizes \
tst-cond14 tst-cond15 tst-cond16 tst-cond17 tst-cond18 tst-cond19 \
tst-cond20 tst-cond21 \
tst-robust1 tst-robust2 tst-robust3 tst-robust4 tst-robust5 \
tst-robust6 tst-robust7 \
tst-robust6 tst-robust7 tst-robust8 \
tst-rwlock1 tst-rwlock2 tst-rwlock3 tst-rwlock4 tst-rwlock5 \
tst-rwlock6 tst-rwlock7 tst-rwlock8 tst-rwlock9 tst-rwlock10 \
tst-rwlock11 tst-rwlock12 tst-rwlock13 tst-rwlock14 \
Expand Down
24 changes: 12 additions & 12 deletions nptl/allocatestack.c
Original file line number Diff line number Diff line change
Expand Up @@ -365,12 +365,6 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
/* The process ID is also the same as that of the caller. */
pd->pid = THREAD_GETMEM (THREAD_SELF, pid);

/* List of robust mutexes. */
#ifdef __PTHREAD_MUTEX_HAVE_PREV
pd->robust_list.__prev = &pd->robust_list;
#endif
pd->robust_list.__next = &pd->robust_list;

/* Allocate the DTV for this thread. */
if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
{
Expand Down Expand Up @@ -505,12 +499,6 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
/* The process ID is also the same as that of the caller. */
pd->pid = THREAD_GETMEM (THREAD_SELF, pid);

/* List of robust mutexes. */
#ifdef __PTHREAD_MUTEX_HAVE_PREV
pd->robust_list.__prev = &pd->robust_list;
#endif
pd->robust_list.__next = &pd->robust_list;

/* Allocate the DTV for this thread. */
if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
{
Expand Down Expand Up @@ -634,6 +622,18 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
stillborn thread could be canceled while the lock is taken. */
pd->lock = LLL_LOCK_INITIALIZER;

/* The robust mutex lists also need to be initialized
unconditionally because the cleanup for the previous stack owner
might have happened in the kernel. */
pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
- offsetof (pthread_mutex_t,
__data.__list.__next));
pd->robust_head.list_op_pending = NULL;
#ifdef __PTHREAD_MUTEX_HAVE_PREV
pd->robust_prev = &pd->robust_head;
#endif
pd->robust_head.list = &pd->robust_head;

/* We place the thread descriptor at the end of the stack. */
*pdp = pd;

Expand Down
45 changes: 36 additions & 9 deletions nptl/descr.h
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,15 @@ struct xid_command
};


/* Data structure used by the kernel to find robust futexes. */
struct robust_list_head
{
void *list;
long int futex_offset;
void *list_op_pending;
};


/* Thread descriptor data structure. */
struct pthread
{
Expand Down Expand Up @@ -136,25 +145,43 @@ struct pthread

/* List of robust mutexes the thread is holding. */
#ifdef __PTHREAD_MUTEX_HAVE_PREV
__pthread_list_t robust_list;
void *robust_prev;
struct robust_list_head robust_head;

/* The list above is strange. It is basically a double linked list
but the pointer to the next/previous element of the list points
in the middle of the object, the __next element. Whenever
casting to __pthread_list_t we need to adjust the pointer
first. */
# define QUEUE_PTR_ADJUST (offsetof (__pthread_list_t, __next))

# define ENQUEUE_MUTEX(mutex) \
do { \
__pthread_list_t *next = THREAD_GETMEM (THREAD_SELF, robust_list.__next); \
next->__prev = &mutex->__data.__list; \
mutex->__data.__list.__next = next; \
mutex->__data.__list.__prev = &THREAD_SELF->robust_list; \
THREAD_SETMEM (THREAD_SELF, robust_list.__next, &mutex->__data.__list); \
__pthread_list_t *next = (THREAD_GETMEM (THREAD_SELF, robust_head.list) \
- QUEUE_PTR_ADJUST); \
next->__prev = (void *) &mutex->__data.__list.__next; \
mutex->__data.__list.__next = (void *) &next->__next; \
mutex->__data.__list.__prev = (void *) &THREAD_SELF->robust_head; \
THREAD_SETMEM (THREAD_SELF, robust_head.list, \
&mutex->__data.__list.__next); \
} while (0)
# define DEQUEUE_MUTEX(mutex) \
do { \
mutex->__data.__list.__next->__prev = mutex->__data.__list.__prev; \
mutex->__data.__list.__prev->__next = mutex->__data.__list.__next; \
__pthread_list_t *next = (__pthread_list_t *) \
((char *) mutex->__data.__list.__next - QUEUE_PTR_ADJUST); \
next->__prev = mutex->__data.__list.__prev; \
__pthread_list_t *prev = (__pthread_list_t *) \
((char *) mutex->__data.__list.__prev - QUEUE_PTR_ADJUST); \
prev->__next = mutex->__data.__list.__next; \
mutex->__data.__list.__prev = NULL; \
mutex->__data.__list.__next = NULL; \
} while (0)
#else
__pthread_slist_t robust_list;
union
{
__pthread_slist_t robust_list;
struct robust_list_head robust_head;
};

# define ENQUEUE_MUTEX(mutex) \
do { \
Expand Down
28 changes: 24 additions & 4 deletions nptl/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,15 @@
size_t __static_tls_size;
size_t __static_tls_align_m1;

#ifndef __ASSUME_SET_ROBUST_LIST
/* Negative if we do not have the system call and we can use it. */
int __set_robust_list_avail;
# define set_robust_list_not_avail() \
__set_robust_list_avail = -1
#else
# define set_robust_list_not_avail() do { } while (0)
#endif

/* Version of the library, used in libthread_db to detect mismatches. */
static const char nptl_version[] __attribute_used__ = VERSION;

Expand Down Expand Up @@ -247,10 +256,6 @@ __pthread_initialize_minimal_internal (void)
struct pthread *pd = THREAD_SELF;
INTERNAL_SYSCALL_DECL (err);
pd->pid = pd->tid = INTERNAL_SYSCALL (set_tid_address, err, 1, &pd->tid);
#ifdef __PTHREAD_MUTEX_HAVE_PREV
pd->robust_list.__prev = &pd->robust_list;
#endif
pd->robust_list.__next = &pd->robust_list;
THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
THREAD_SETMEM (pd, user_stack, true);
if (LLL_LOCK_INITIALIZER != 0)
Expand All @@ -259,6 +264,21 @@ __pthread_initialize_minimal_internal (void)
THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
#endif

/* Initialize the robust mutex data. */
#ifdef __PTHREAD_MUTEX_HAVE_PREV
pd->robust_prev = &pd->robust_head;
#endif
pd->robust_head.list = &pd->robust_head;
#ifdef __NR_set_robust_list
pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
- offsetof (pthread_mutex_t,
__data.__list.__next));
int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
sizeof (struct robust_list_head));
if (INTERNAL_SYSCALL_ERROR_P (res, err))
#endif
set_robust_list_not_avail ();

/* Set initial thread's stack block from 0 up to __libc_stack_end.
It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
purposes this is good enough. */
Expand Down
31 changes: 24 additions & 7 deletions nptl/pthreadP.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
#include <internaltypes.h>
#include <pthread-functions.h>
#include <atomic.h>
#include <kernel-features.h>


/* Atomic operations on TLS memory. */
Expand Down Expand Up @@ -60,13 +61,13 @@
/* Internal mutex type value. */
enum
{
PTHREAD_MUTEX_ROBUST_PRIVATE_NP = 16,
PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP
= PTHREAD_MUTEX_ROBUST_PRIVATE_NP | PTHREAD_MUTEX_RECURSIVE_NP,
PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP
= PTHREAD_MUTEX_ROBUST_PRIVATE_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP
= PTHREAD_MUTEX_ROBUST_PRIVATE_NP | PTHREAD_MUTEX_ADAPTIVE_NP,
PTHREAD_MUTEX_ROBUST_NORMAL_NP = 16,
PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
= PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_RECURSIVE_NP,
PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
= PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
= PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_ADAPTIVE_NP,
PTHREAD_MUTEX_PRIO_INHERIT_PRIVATE_NP = 32,
PTHREAD_MUTEX_PRIO_PROTECT_PRIVATE_NP = 64
};
Expand Down Expand Up @@ -128,6 +129,11 @@ hidden_proto (__pthread_keys)
/* Number of threads running. */
extern unsigned int __nptl_nthreads attribute_hidden;

#ifndef __ASSUME_SET_ROBUST_LIST
/* Negative if we do not have the system call and we can use it. */
extern int __set_robust_list_avail attribute_hidden;
#endif

/* The library can run in debugging mode where it performs a lot more
tests. */
extern int __pthread_debug attribute_hidden;
Expand Down Expand Up @@ -504,4 +510,15 @@ extern int __nptl_setxid (struct xid_command *cmdp) attribute_hidden;
# define PTHREAD_STATIC_FN_REQUIRE(name) __asm (".globl " #name);
#endif


#ifndef __NR_set_robust_list
/* XXX For the time being... Once we can rely on the kernel headers
having the definition remove these lines. */
# if defined __i386__
# define __NR_set_robust_list 311
# elif defined __x86_64__
# define __NR_set_robust_list 273
# endif
#endif

#endif /* pthreadP.h */
46 changes: 29 additions & 17 deletions nptl/pthread_create.c
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,19 @@ start_thread (void *arg)
/* Initialize resolver state pointer. */
__resp = &pd->res;

#ifdef __NR_set_robust_list
# ifndef __ASSUME_SET_ROBUST_LIST
if (__set_robust_list_avail >= 0)
# endif
{
INTERNAL_SYSCALL_DECL (err);
/* This call should never fail because the initial call in init.c
succeeded. */
INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
sizeof (struct robust_list_head));
}
#endif

/* This is where the try/finally block should be created. For
compilers without that support we do use setjmp. */
struct pthread_unwind_buf unwind_buf;
Expand Down Expand Up @@ -310,35 +323,34 @@ start_thread (void *arg)
the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
atomic_bit_set (&pd->cancelhandling, EXITING_BIT);

#ifndef __ASSUME_SET_ROBUST_LIST
/* If this thread has any robust mutexes locked, handle them now. */
#if __WORDSIZE == 64
__pthread_list_t *robust = pd->robust_list.__next;
#else
# if __WORDSIZE == 64
void *robust = pd->robust_head.list;
# else
__pthread_slist_t *robust = pd->robust_list.__next;
#endif
if (__builtin_expect (robust != &pd->robust_list, 0))
# endif
/* We let the kernel do the notification if it is able to do so. */
if (__set_robust_list_avail < 0
&& __builtin_expect (robust != &pd->robust_head, 0))
{
do
{
struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
((char *) robust - offsetof (struct __pthread_mutex_s, __list));
robust = robust->__next;
((char *) robust - offsetof (struct __pthread_mutex_s,
__list.__next));
robust = *((void **) robust);

this->__list.__next = NULL;
#ifdef __PTHREAD_MUTEX_HAVE_PREV
# ifdef __PTHREAD_MUTEX_HAVE_PREV
this->__list.__prev = NULL;
#endif
# endif
this->__list.__next = NULL;

lll_robust_mutex_dead (this->__lock);
}
while (robust != &pd->robust_list);

/* Clean up so that the thread descriptor can be reused. */
pd->robust_list.__next = &pd->robust_list;
#ifdef __PTHREAD_MUTEX_HAVE_PREV
pd->robust_list.__prev = &pd->robust_list;
#endif
while (robust != &pd->robust_head);
}
#endif

/* If the thread is detached free the TCB. */
if (IS_DETACHED (pd))
Expand Down
2 changes: 1 addition & 1 deletion nptl/pthread_mutex_consistent.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ pthread_mutex_consistent_np (mutex)
pthread_mutex_t *mutex;
{
/* Test whether this is a robust mutex with a dead owner. */
if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_PRIVATE_NP) == 0
if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0
|| mutex->__data.__owner != PTHREAD_MUTEX_INCONSISTENT)
return EINVAL;

Expand Down
12 changes: 3 additions & 9 deletions nptl/pthread_mutex_destroy.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,15 +25,9 @@ int
__pthread_mutex_destroy (mutex)
pthread_mutex_t *mutex;
{
if (mutex->__data.__nusers != 0)
{
if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_PRIVATE_NP) != 0
&& (mutex->__data.__lock & FUTEX_OWNER_DIED) != 0
&& mutex->__data.__nusers == 1)
goto dead_robust_mutex;

return EBUSY;
}
if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0
&& mutex->__data.__nusers != 0)
return EBUSY;

/* Set to an invalid value. */
dead_robust_mutex:
Expand Down
17 changes: 11 additions & 6 deletions nptl/pthread_mutex_init.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
#include <string.h>
#include "pthreadP.h"


static const struct pthread_mutexattr default_attr =
{
/* Default is a normal mutex, not shared between processes. */
Expand All @@ -42,10 +41,6 @@ __pthread_mutex_init (mutex, mutexattr)
imutexattr = (const struct pthread_mutexattr *) mutexattr ?: &default_attr;

/* Sanity checks. */
// XXX For now we cannot implement robust mutexes if they are shared.
if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) != 0
&& (imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_PSHARED) != 0)
return ENOTSUP;
// XXX For now we don't support priority inherited or priority protected
// XXX mutexes.
if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_PROTOCOL_MASK)
Expand All @@ -57,8 +52,18 @@ __pthread_mutex_init (mutex, mutexattr)

/* Copy the values from the attribute. */
mutex->__data.__kind = imutexattr->mutexkind & ~PTHREAD_MUTEXATTR_FLAG_BITS;

if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) != 0)
mutex->__data.__kind |= PTHREAD_MUTEX_ROBUST_PRIVATE_NP;
{
#ifndef __ASSUME_SET_ROBUST_LIST
if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_PSHARED) != 0
&& __set_robust_list_avail < 0)
return ENOTSUP;
#endif

mutex->__data.__kind |= PTHREAD_MUTEX_ROBUST_NORMAL_NP;
}

switch ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_PROTOCOL_MASK)
>> PTHREAD_MUTEXATTR_PROTOCOL_SHIFT)
{
Expand Down
Loading

0 comments on commit 0f6699e

Please sign in to comment.