Skip to content

Commit

Permalink
[SPARC64]: Non-atomic bitops do not need volatile operations
Browse files Browse the repository at this point in the history
Noticed this while comparing sparc64's bitops.h to ppc64's.
We can cast the volatile memory argument to be non-volatile.

While we're here, __inline__ --> inline.

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Jul 25, 2005
1 parent 4b50242 commit 6593eae
Showing 1 changed file with 28 additions and 28 deletions.
56 changes: 28 additions & 28 deletions include/asm-sparc64/bitops.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,52 +20,52 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);

/* "non-atomic" versions... */

static __inline__ void __set_bit(int nr, volatile unsigned long *addr)
static inline void __set_bit(int nr, volatile unsigned long *addr)
{
volatile unsigned long *m = addr + (nr >> 6);
unsigned long *m = ((unsigned long *)addr) + (nr >> 6);

*m |= (1UL << (nr & 63));
}

static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
static inline void __clear_bit(int nr, volatile unsigned long *addr)
{
volatile unsigned long *m = addr + (nr >> 6);
unsigned long *m = ((unsigned long *)addr) + (nr >> 6);

*m &= ~(1UL << (nr & 63));
}

static __inline__ void __change_bit(int nr, volatile unsigned long *addr)
static inline void __change_bit(int nr, volatile unsigned long *addr)
{
volatile unsigned long *m = addr + (nr >> 6);
unsigned long *m = ((unsigned long *)addr) + (nr >> 6);

*m ^= (1UL << (nr & 63));
}

static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr)
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
{
volatile unsigned long *m = addr + (nr >> 6);
long old = *m;
long mask = (1UL << (nr & 63));
unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
unsigned long old = *m;
unsigned long mask = (1UL << (nr & 63));

*m = (old | mask);
return ((old & mask) != 0);
}

static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{
volatile unsigned long *m = addr + (nr >> 6);
long old = *m;
long mask = (1UL << (nr & 63));
unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
unsigned long old = *m;
unsigned long mask = (1UL << (nr & 63));

*m = (old & ~mask);
return ((old & mask) != 0);
}

static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr)
static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
{
volatile unsigned long *m = addr + (nr >> 6);
long old = *m;
long mask = (1UL << (nr & 63));
unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
unsigned long old = *m;
unsigned long mask = (1UL << (nr & 63));

*m = (old ^ mask);
return ((old & mask) != 0);
Expand All @@ -79,13 +79,13 @@ static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr
#define smp_mb__after_clear_bit() barrier()
#endif

static __inline__ int test_bit(int nr, __const__ volatile unsigned long *addr)
static inline int test_bit(int nr, __const__ volatile unsigned long *addr)
{
return (1UL & ((addr)[nr >> 6] >> (nr & 63))) != 0UL;
return (1UL & (addr[nr >> 6] >> (nr & 63))) != 0UL;
}

/* The easy/cheese version for now. */
static __inline__ unsigned long ffz(unsigned long word)
static inline unsigned long ffz(unsigned long word)
{
unsigned long result;

Expand All @@ -103,7 +103,7 @@ static __inline__ unsigned long ffz(unsigned long word)
*
* Undefined if no bit exists, so code should check against 0 first.
*/
static __inline__ unsigned long __ffs(unsigned long word)
static inline unsigned long __ffs(unsigned long word)
{
unsigned long result = 0;

Expand Down Expand Up @@ -144,7 +144,7 @@ static inline int sched_find_first_bit(unsigned long *b)
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
static __inline__ int ffs(int x)
static inline int ffs(int x)
{
if (!x)
return 0;
Expand All @@ -158,31 +158,31 @@ static __inline__ int ffs(int x)

#ifdef ULTRA_HAS_POPULATION_COUNT

static __inline__ unsigned int hweight64(unsigned long w)
static inline unsigned int hweight64(unsigned long w)
{
unsigned int res;

__asm__ ("popc %1,%0" : "=r" (res) : "r" (w));
return res;
}

static __inline__ unsigned int hweight32(unsigned int w)
static inline unsigned int hweight32(unsigned int w)
{
unsigned int res;

__asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffffffff));
return res;
}

static __inline__ unsigned int hweight16(unsigned int w)
static inline unsigned int hweight16(unsigned int w)
{
unsigned int res;

__asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffff));
return res;
}

static __inline__ unsigned int hweight8(unsigned int w)
static inline unsigned int hweight8(unsigned int w)
{
unsigned int res;

Expand Down Expand Up @@ -236,7 +236,7 @@ extern unsigned long find_next_zero_bit(const unsigned long *,
#define test_and_clear_le_bit(nr,addr) \
test_and_clear_bit((nr) ^ 0x38, (addr))

static __inline__ int test_le_bit(int nr, __const__ unsigned long * addr)
static inline int test_le_bit(int nr, __const__ unsigned long * addr)
{
int mask;
__const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
Expand Down

0 comments on commit 6593eae

Please sign in to comment.