Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 44315
b: refs/heads/master
c: ec723fb
h: refs/heads/master
i:
  44313: f021a22
  44311: 61e224f
v: v3
  • Loading branch information
Paul Mundt committed Dec 11, 2006
1 parent 919d9ff commit 6aff5f6
Show file tree
Hide file tree
Showing 4 changed files with 181 additions and 152 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: a45e724ba07c02bcf3da96ddc4efefbfe10957f5
refs/heads/master: ec723fbe7e19f5a66cea183bca7ca20675631a7a
71 changes: 71 additions & 0 deletions trunk/include/asm-sh/atomic-irq.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
#ifndef __ASM_SH_ATOMIC_IRQ_H
#define __ASM_SH_ATOMIC_IRQ_H

/*
* To get proper branch prediction for the main line, we must branch
* forward to code at the end of this object's .text section, then
* branch back to restart the operation.
*/
static inline void atomic_add(int i, atomic_t *v)
{
unsigned long flags;

local_irq_save(flags);
*(long *)v += i;
local_irq_restore(flags);
}

static inline void atomic_sub(int i, atomic_t *v)
{
unsigned long flags;

local_irq_save(flags);
*(long *)v -= i;
local_irq_restore(flags);
}

static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long temp, flags;

local_irq_save(flags);
temp = *(long *)v;
temp += i;
*(long *)v = temp;
local_irq_restore(flags);

return temp;
}

static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long temp, flags;

local_irq_save(flags);
temp = *(long *)v;
temp -= i;
*(long *)v = temp;
local_irq_restore(flags);

return temp;
}

static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;

local_irq_save(flags);
*(long *)v &= ~mask;
local_irq_restore(flags);
}

static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;

local_irq_save(flags);
*(long *)v |= mask;
local_irq_restore(flags);
}

#endif /* __ASM_SH_ATOMIC_IRQ_H */
107 changes: 107 additions & 0 deletions trunk/include/asm-sh/atomic-llsc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
#ifndef __ASM_SH_ATOMIC_LLSC_H
#define __ASM_SH_ATOMIC_LLSC_H

/*
* To get proper branch prediction for the main line, we must branch
* forward to code at the end of this object's .text section, then
* branch back to restart the operation.
*/
static inline void atomic_add(int i, atomic_t *v)
{
unsigned long tmp;

__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_add \n"
" add %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
: "=&z" (tmp)
: "r" (i), "r" (&v->counter)
: "t");
}

static inline void atomic_sub(int i, atomic_t *v)
{
unsigned long tmp;

__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_sub \n"
" sub %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
: "=&z" (tmp)
: "r" (i), "r" (&v->counter)
: "t");
}

/*
* SH-4A note:
*
* We basically get atomic_xxx_return() for free compared with
* atomic_xxx(). movli.l/movco.l require r0 due to the instruction
* encoding, so the retval is automatically set without having to
* do any special work.
*/
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long temp;

__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_add_return \n"
" add %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
" synco \n"
: "=&z" (temp)
: "r" (i), "r" (&v->counter)
: "t");

return temp;
}

static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long temp;

__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_sub_return \n"
" sub %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
" synco \n"
: "=&z" (temp)
: "r" (i), "r" (&v->counter)
: "t");

return temp;
}

static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
unsigned long tmp;

__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_clear_mask \n"
" and %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
: "=&z" (tmp)
: "r" (~mask), "r" (&v->counter)
: "t");
}

static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
unsigned long tmp;

__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_set_mask \n"
" or %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
: "=&z" (tmp)
: "r" (mask), "r" (&v->counter)
: "t");
}

#endif /* __ASM_SH_ATOMIC_LLSC_H */
153 changes: 2 additions & 151 deletions trunk/include/asm-sh/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,119 +17,14 @@ typedef struct { volatile int counter; } atomic_t;
#include <linux/compiler.h>
#include <asm/system.h>

/*
* To get proper branch prediction for the main line, we must branch
* forward to code at the end of this object's .text section, then
* branch back to restart the operation.
*/
static inline void atomic_add(int i, atomic_t *v)
{
#ifdef CONFIG_CPU_SH4A
unsigned long tmp;

__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_add \n"
" add %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
: "=&z" (tmp)
: "r" (i), "r" (&v->counter)
: "t");
#include <asm/atomic-llsc.h>
#else
unsigned long flags;

local_irq_save(flags);
*(long *)v += i;
local_irq_restore(flags);
#endif
}

static inline void atomic_sub(int i, atomic_t *v)
{
#ifdef CONFIG_CPU_SH4A
unsigned long tmp;

__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_sub \n"
" sub %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
: "=&z" (tmp)
: "r" (i), "r" (&v->counter)
: "t");
#else
unsigned long flags;

local_irq_save(flags);
*(long *)v -= i;
local_irq_restore(flags);
#include <asm/atomic-irq.h>
#endif
}

/*
* SH-4A note:
*
* We basically get atomic_xxx_return() for free compared with
* atomic_xxx(). movli.l/movco.l require r0 due to the instruction
* encoding, so the retval is automatically set without having to
* do any special work.
*/
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long temp;

#ifdef CONFIG_CPU_SH4A
__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_add_return \n"
" add %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
" synco \n"
: "=&z" (temp)
: "r" (i), "r" (&v->counter)
: "t");
#else
unsigned long flags;

local_irq_save(flags);
temp = *(long *)v;
temp += i;
*(long *)v = temp;
local_irq_restore(flags);
#endif

return temp;
}

#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)

static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long temp;

#ifdef CONFIG_CPU_SH4A
__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_sub_return \n"
" sub %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
" synco \n"
: "=&z" (temp)
: "r" (i), "r" (&v->counter)
: "t");
#else
unsigned long flags;

local_irq_save(flags);
temp = *(long *)v;
temp -= i;
*(long *)v = temp;
local_irq_restore(flags);
#endif

return temp;
}

#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))

Expand Down Expand Up @@ -180,50 +75,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)

static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
#ifdef CONFIG_CPU_SH4A
unsigned long tmp;

__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_clear_mask \n"
" and %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
: "=&z" (tmp)
: "r" (~mask), "r" (&v->counter)
: "t");
#else
unsigned long flags;

local_irq_save(flags);
*(long *)v &= ~mask;
local_irq_restore(flags);
#endif
}

static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
#ifdef CONFIG_CPU_SH4A
unsigned long tmp;

__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_set_mask \n"
" or %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
: "=&z" (tmp)
: "r" (mask), "r" (&v->counter)
: "t");
#else
unsigned long flags;

local_irq_save(flags);
*(long *)v |= mask;
local_irq_restore(flags);
#endif
}

/* Atomic operations are already serializing on SH */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
Expand Down

0 comments on commit 6aff5f6

Please sign in to comment.