Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 36303
b: refs/heads/master
c: 781125c
h: refs/heads/master
i:
  36301: 7a6da99
  36299: 47ef910
  36295: cf90f20
  36287: f5b8816
v: v3
  • Loading branch information
Paul Mundt committed Sep 27, 2006
1 parent 11873e0 commit 477f105
Show file tree
Hide file tree
Showing 2 changed files with 97 additions and 10 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 15f57a29a19ad0dbb468363cb617b06f71f6de92
refs/heads/master: 781125ca58dfbd47635cfc0e408f1f9d7e10b227
105 changes: 96 additions & 9 deletions trunk/include/asm-sh/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,49 +22,110 @@ typedef struct { volatile int counter; } atomic_t;
* forward to code at the end of this object's .text section, then
* branch back to restart the operation.
*/

static __inline__ void atomic_add(int i, atomic_t * v)
static inline void atomic_add(int i, atomic_t *v)
{
#ifdef CONFIG_CPU_SH4A
unsigned long tmp;

__asm__ __volatile__ (
"1: movli.l @%3, %0 ! atomic_add \n"
" add %2, %0 \n"
" movco.l %0, @%3 \n"
" bf 1b \n"
: "=&z" (tmp), "=r" (&v->counter)
: "r" (i), "r" (&v->counter)
: "t");
#else
unsigned long flags;

local_irq_save(flags);
*(long *)v += i;
local_irq_restore(flags);
#endif
}

static __inline__ void atomic_sub(int i, atomic_t *v)
static inline void atomic_sub(int i, atomic_t *v)
{
#ifdef CONFIG_CPU_SH4A
unsigned long tmp;

__asm__ __volatile__ (
"1: movli.l @%3, %0 ! atomic_sub \n"
" sub %2, %0 \n"
" movco.l %0, @%3 \n"
" bf 1b \n"
: "=&z" (tmp), "=r" (&v->counter)
: "r" (i), "r" (&v->counter)
: "t");
#else
unsigned long flags;

local_irq_save(flags);
*(long *)v -= i;
local_irq_restore(flags);
#endif
}

static __inline__ int atomic_add_return(int i, atomic_t * v)
/*
* SH-4A note:
*
* We basically get atomic_xxx_return() for free compared with
* atomic_xxx(). movli.l/movco.l require r0 due to the instruction
* encoding, so the retval is automatically set without having to
* do any special work.
*/
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long temp, flags;
unsigned long temp;

#ifdef CONFIG_CPU_SH4A
__asm__ __volatile__ (
"1: movli.l @%3, %0 ! atomic_add_return \n"
" add %2, %0 \n"
" movco.l %0, @%3 \n"
" bf 1b \n"
" synco \n"
: "=&z" (temp), "=r" (&v->counter)
: "r" (i), "r" (&v->counter)
: "t");
#else
unsigned long flags;

local_irq_save(flags);
temp = *(long *)v;
temp += i;
*(long *)v = temp;
local_irq_restore(flags);
#endif

return temp;
}

#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)

static __inline__ int atomic_sub_return(int i, atomic_t * v)
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long temp, flags;
unsigned long temp;

#ifdef CONFIG_CPU_SH4A
__asm__ __volatile__ (
"1: movli.l @%3, %0 ! atomic_sub_return \n"
" sub %2, %0 \n"
" movco.l %0, @%3 \n"
" bf 1b \n"
" synco \n"
: "=&z" (temp), "=r" (&v->counter)
: "r" (i), "r" (&v->counter)
: "t");
#else
unsigned long flags;

local_irq_save(flags);
temp = *(long *)v;
temp -= i;
*(long *)v = temp;
local_irq_restore(flags);
#endif

return temp;
}
Expand Down Expand Up @@ -119,22 +180,48 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)

static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
#ifdef CONFIG_CPU_SH4A
unsigned long tmp;

__asm__ __volatile__ (
"1: movli.l @%3, %0 ! atomic_clear_mask \n"
" and %2, %0 \n"
" movco.l %0, @%3 \n"
" bf 1b \n"
: "=&z" (tmp), "=r" (&v->counter)
: "r" (~mask), "r" (&v->counter)
: "t");
#else
unsigned long flags;

local_irq_save(flags);
*(long *)v &= ~mask;
local_irq_restore(flags);
#endif
}

static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
#ifdef CONFIG_CPU_SH4A
unsigned long tmp;

__asm__ __volatile__ (
"1: movli.l @%3, %0 ! atomic_set_mask \n"
" or %2, %0 \n"
" movco.l %0, @%3 \n"
" bf 1b \n"
: "=&z" (tmp), "=r" (&v->counter)
: "r" (mask), "r" (&v->counter)
: "t");
#else
unsigned long flags;

local_irq_save(flags);
*(long *)v |= mask;
local_irq_restore(flags);
#endif
}

/* Atomic operations are already serializing on SH */
Expand Down

0 comments on commit 477f105

Please sign in to comment.