Skip to content

Commit

Permalink
asm-generic/atomic.h: allow SMP peeps to leverage this
Browse files Browse the repository at this point in the history
Only a few core funcs need to be implemented for SMP systems, so allow the
arches to override them while getting the rest for free.

At least, this is enough to allow the Blackfin SMP port to use things.

Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Cc: Arun Sharma <asharma@fb.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Mike Frysinger authored and Linus Torvalds committed Jul 26, 2011
1 parent 00b3c28 commit 7505cb6
Showing 1 changed file with 15 additions and 1 deletion.
16 changes: 15 additions & 1 deletion include/asm-generic/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,11 @@
#define __ASM_GENERIC_ATOMIC_H

#ifdef CONFIG_SMP
#error not SMP safe
/* Force people to define core atomics */
# if !defined(atomic_add_return) || !defined(atomic_sub_return) || \
!defined(atomic_clear_mask) || !defined(atomic_set_mask)
# error "SMP requires a little arch-specific magic"
# endif
#endif

/*
Expand All @@ -34,7 +38,9 @@
*
* Atomically reads the value of @v.
*/
#ifndef atomic_read
#define atomic_read(v) (*(volatile int *)&(v)->counter)
#endif

/**
* atomic_set - set atomic variable
Expand All @@ -55,6 +61,7 @@
*
* Atomically adds @i to @v and returns the result
*/
#ifndef atomic_add_return
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long flags;
Expand All @@ -68,6 +75,7 @@ static inline int atomic_add_return(int i, atomic_t *v)

return temp;
}
#endif

/**
* atomic_sub_return - subtract integer from atomic variable
Expand All @@ -76,6 +84,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
*
* Atomically subtracts @i from @v and returns the result
*/
#ifndef atomic_sub_return
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long flags;
Expand All @@ -89,6 +98,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)

return temp;
}
#endif

static inline int atomic_add_negative(int i, atomic_t *v)
{
Expand Down Expand Up @@ -147,6 +157,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
*
* Atomically clears the bits set in @mask from @v
*/
#ifndef atomic_clear_mask
static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
{
unsigned long flags;
Expand All @@ -156,6 +167,7 @@ static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
v->counter &= mask;
raw_local_irq_restore(flags);
}
#endif

/**
* atomic_set_mask - Atomically set bits in atomic variable
Expand All @@ -164,6 +176,7 @@ static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
*
* Atomically sets the bits set in @mask in @v
*/
#ifndef atomic_set_mask
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;
Expand All @@ -172,6 +185,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
v->counter |= mask;
raw_local_irq_restore(flags);
}
#endif

/* Assume that atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
Expand Down

0 comments on commit 7505cb6

Please sign in to comment.