Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 20129
b: refs/heads/master
c: 365bf8a
h: refs/heads/master
i:
  20127: 0974f84
v: v3
  • Loading branch information
Nicolas Pitre authored and Russell King committed Feb 8, 2006
1 parent 8c4dd79 commit 542f417
Show file tree
Hide file tree
Showing 2 changed files with 66 additions and 67 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 5964eae835c3b98c69d338950651f7f414f96477
refs/heads/master: 365bf8ac6f5b3d3187cb39444fa87a5b38683ff4
131 changes: 65 additions & 66 deletions trunk/include/asm-arm/mutex.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,72 +23,71 @@
* simply bail out immediately through the slow path where the lock will be
* reattempted until it succeeds.
*/
#define __mutex_fastpath_lock(count, fail_fn) \
do { \
int __ex_flag, __res; \
\
typecheck(atomic_t *, count); \
typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
\
__asm__ ( \
"ldrex %0, [%2] \n" \
"sub %0, %0, #1 \n" \
"strex %1, %0, [%2] \n" \
\
: "=&r" (__res), "=&r" (__ex_flag) \
: "r" (&(count)->counter) \
: "cc","memory" ); \
\
if (unlikely(__res || __ex_flag)) \
fail_fn(count); \
} while (0)

#define __mutex_fastpath_lock_retval(count, fail_fn) \
({ \
int __ex_flag, __res; \
\
typecheck(atomic_t *, count); \
typecheck_fn(fastcall int (*)(atomic_t *), fail_fn); \
\
__asm__ ( \
"ldrex %0, [%2] \n" \
"sub %0, %0, #1 \n" \
"strex %1, %0, [%2] \n" \
\
: "=&r" (__res), "=&r" (__ex_flag) \
: "r" (&(count)->counter) \
: "cc","memory" ); \
\
__res |= __ex_flag; \
if (unlikely(__res != 0)) \
__res = fail_fn(count); \
__res; \
})
static inline void
__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
{
int __ex_flag, __res;

__asm__ (

"ldrex %0, [%2] \n\t"
"sub %0, %0, #1 \n\t"
"strex %1, %0, [%2] "

: "=&r" (__res), "=&r" (__ex_flag)
: "r" (&(count)->counter)
: "cc","memory" );

__res |= __ex_flag;
if (unlikely(__res != 0))
fail_fn(count);
}

static inline int
__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
{
int __ex_flag, __res;

__asm__ (

"ldrex %0, [%2] \n\t"
"sub %0, %0, #1 \n\t"
"strex %1, %0, [%2] "

: "=&r" (__res), "=&r" (__ex_flag)
: "r" (&(count)->counter)
: "cc","memory" );

__res |= __ex_flag;
if (unlikely(__res != 0))
__res = fail_fn(count);
return __res;
}

/*
* Same trick is used for the unlock fast path. However the original value,
* rather than the result, is used to test for success in order to have
* better generated assembly.
*/
#define __mutex_fastpath_unlock(count, fail_fn) \
do { \
int __ex_flag, __res, __orig; \
\
typecheck(atomic_t *, count); \
typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
\
__asm__ ( \
"ldrex %0, [%3] \n" \
"add %1, %0, #1 \n" \
"strex %2, %1, [%3] \n" \
\
: "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) \
: "r" (&(count)->counter) \
: "cc","memory" ); \
\
if (unlikely(__orig || __ex_flag)) \
fail_fn(count); \
} while (0)
static inline void
__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
{
int __ex_flag, __res, __orig;

__asm__ (

"ldrex %0, [%3] \n\t"
"add %1, %0, #1 \n\t"
"strex %2, %1, [%3] "

: "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
: "r" (&(count)->counter)
: "cc","memory" );

__orig |= __ex_flag;
if (unlikely(__orig != 0))
fail_fn(count);
}

/*
* If the unlock was done on a contended lock, or if the unlock simply fails
Expand All @@ -110,12 +109,12 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))

__asm__ (

"1: ldrex %0, [%3] \n"
"subs %1, %0, #1 \n"
"strexeq %2, %1, [%3] \n"
"movlt %0, #0 \n"
"cmpeq %2, #0 \n"
"bgt 1b \n"
"1: ldrex %0, [%3] \n\t"
"subs %1, %0, #1 \n\t"
"strexeq %2, %1, [%3] \n\t"
"movlt %0, #0 \n\t"
"cmpeq %2, #0 \n\t"
"bgt 1b "

: "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
: "r" (&count->counter)
Expand Down

0 comments on commit 542f417

Please sign in to comment.