Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 72352
b: refs/heads/master
c: 418ccbe
h: refs/heads/master
v: v3
  • Loading branch information
Nick Piggin authored and Thomas Gleixner committed Oct 23, 2007
1 parent fcad607 commit 681f20f
Show file tree
Hide file tree
Showing 3 changed files with 84 additions and 3 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: ea5806559f92a3e7439bc7a4f2c0d04692e68931
refs/heads/master: 418ccbe37f70f5021c4cd1cdcb0ce7f98d05f2dd
43 changes: 42 additions & 1 deletion trunk/include/asm-x86/bitops_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,13 +80,46 @@ static inline void clear_bit(int nr, volatile unsigned long * addr)
:"Ir" (nr));
}

/*
* clear_bit_unlock - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* clear_bit() is atomic and implies release semantics before the memory
* operation. It can be used for an unlock.
*/
static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
{
barrier();
clear_bit(nr, addr);
}

static inline void __clear_bit(int nr, volatile unsigned long * addr)
{
__asm__ __volatile__(
"btrl %1,%0"
:"+m" (ADDR)
:"Ir" (nr));
}

/*
* __clear_bit_unlock - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* __clear_bit() is non-atomic and implies release semantics before the memory
* operation. It can be used for an unlock if no other CPUs can concurrently
* modify other bits in the word.
*
* No memory barrier is required here, because x86 cannot reorder stores past
* older loads. Same principle as spin_unlock.
*/
static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
{
barrier();
__clear_bit(nr, addr);
}

#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()

Expand Down Expand Up @@ -145,6 +178,15 @@ static inline int test_and_set_bit(int nr, volatile unsigned long * addr)
return oldbit;
}

/**
* test_and_set_bit_lock - Set a bit and return its old value for lock
* @nr: Bit to set
* @addr: Address to count from
*
* This is the same as test_and_set_bit on x86
*/
#define test_and_set_bit_lock test_and_set_bit

/**
* __test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
Expand Down Expand Up @@ -406,7 +448,6 @@ static inline int fls(int x)
}

#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>

#endif /* __KERNEL__ */

Expand Down
42 changes: 41 additions & 1 deletion trunk/include/asm-x86/bitops_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,20 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
:"dIr" (nr));
}

/*
* clear_bit_unlock - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* clear_bit() is atomic and implies release semantics before the memory
* operation. It can be used for an unlock.
*/
static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
{
barrier();
clear_bit(nr, addr);
}

static __inline__ void __clear_bit(int nr, volatile void * addr)
{
__asm__ __volatile__(
Expand All @@ -80,6 +94,24 @@ static __inline__ void __clear_bit(int nr, volatile void * addr)
:"dIr" (nr));
}

/*
* __clear_bit_unlock - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* __clear_bit() is non-atomic and implies release semantics before the memory
* operation. It can be used for an unlock if no other CPUs can concurrently
* modify other bits in the word.
*
* No memory barrier is required here, because x86 cannot reorder stores past
* older loads. Same principle as spin_unlock.
*/
static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
{
barrier();
__clear_bit(nr, addr);
}

#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()

Expand Down Expand Up @@ -136,6 +168,15 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr)
return oldbit;
}

/**
* test_and_set_bit_lock - Set a bit and return its old value for lock
* @nr: Bit to set
* @addr: Address to count from
*
* This is the same as test_and_set_bit on x86
*/
#define test_and_set_bit_lock test_and_set_bit

/**
* __test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
Expand Down Expand Up @@ -412,7 +453,6 @@ static __inline__ int fls(int x)
#define ARCH_HAS_FAST_MULTIPLIER 1

#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>

#endif /* __KERNEL__ */

Expand Down

0 comments on commit 681f20f

Please sign in to comment.