Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 360118
b: refs/heads/master
c: 14e968b
h: refs/heads/master
v: v3
  • Loading branch information
Vineet Gupta committed Feb 11, 2013
1 parent 74652ba commit 26dfdc4
Show file tree
Hide file tree
Showing 6 changed files with 968 additions and 1 deletion.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: ac4c244d4e5d914f9a5642cdcc03b18780e55dbc
refs/heads/master: 14e968bad788de922a755a84b92cb29f8c1342e4
232 changes: 232 additions & 0 deletions trunk/arch/arc/include/asm/atomic.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,232 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/

#ifndef _ASM_ARC_ATOMIC_H
#define _ASM_ARC_ATOMIC_H

#ifdef __KERNEL__

#ifndef __ASSEMBLY__

#include <linux/types.h>
#include <linux/compiler.h>
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
#include <asm/smp.h>

#define atomic_read(v) ((v)->counter)

#ifdef CONFIG_ARC_HAS_LLSC

#define atomic_set(v, i) (((v)->counter) = (i))

static inline void atomic_add(int i, atomic_t *v)
{
unsigned int temp;

__asm__ __volatile__(
"1: llock %0, [%1] \n"
" add %0, %0, %2 \n"
" scond %0, [%1] \n"
" bnz 1b \n"
: "=&r"(temp) /* Early clobber, to prevent reg reuse */
: "r"(&v->counter), "ir"(i)
: "cc");
}

static inline void atomic_sub(int i, atomic_t *v)
{
unsigned int temp;

__asm__ __volatile__(
"1: llock %0, [%1] \n"
" sub %0, %0, %2 \n"
" scond %0, [%1] \n"
" bnz 1b \n"
: "=&r"(temp)
: "r"(&v->counter), "ir"(i)
: "cc");
}

/* add and also return the new value */
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned int temp;

__asm__ __volatile__(
"1: llock %0, [%1] \n"
" add %0, %0, %2 \n"
" scond %0, [%1] \n"
" bnz 1b \n"
: "=&r"(temp)
: "r"(&v->counter), "ir"(i)
: "cc");

return temp;
}

static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned int temp;

__asm__ __volatile__(
"1: llock %0, [%1] \n"
" sub %0, %0, %2 \n"
" scond %0, [%1] \n"
" bnz 1b \n"
: "=&r"(temp)
: "r"(&v->counter), "ir"(i)
: "cc");

return temp;
}

static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{
unsigned int temp;

__asm__ __volatile__(
"1: llock %0, [%1] \n"
" bic %0, %0, %2 \n"
" scond %0, [%1] \n"
" bnz 1b \n"
: "=&r"(temp)
: "r"(addr), "ir"(mask)
: "cc");
}

#else /* !CONFIG_ARC_HAS_LLSC */

#ifndef CONFIG_SMP

/* violating atomic_xxx API locking protocol in UP for optimization sake */
#define atomic_set(v, i) (((v)->counter) = (i))

#else

static inline void atomic_set(atomic_t *v, int i)
{
/*
* Independent of hardware support, all of the atomic_xxx() APIs need
* to follow the same locking rules to make sure that a "hardware"
* atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
* sequence
*
* Thus atomic_set() despite being 1 insn (and seemingly atomic)
* requires the locking.
*/
unsigned long flags;

atomic_ops_lock(flags);
v->counter = i;
atomic_ops_unlock(flags);
}
#endif

/*
* Non hardware assisted Atomic-R-M-W
* Locking would change to irq-disabling only (UP) and spinlocks (SMP)
*/

static inline void atomic_add(int i, atomic_t *v)
{
unsigned long flags;

atomic_ops_lock(flags);
v->counter += i;
atomic_ops_unlock(flags);
}

static inline void atomic_sub(int i, atomic_t *v)
{
unsigned long flags;

atomic_ops_lock(flags);
v->counter -= i;
atomic_ops_unlock(flags);
}

static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long flags;
unsigned long temp;

atomic_ops_lock(flags);
temp = v->counter;
temp += i;
v->counter = temp;
atomic_ops_unlock(flags);

return temp;
}

static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long flags;
unsigned long temp;

atomic_ops_lock(flags);
temp = v->counter;
temp -= i;
v->counter = temp;
atomic_ops_unlock(flags);

return temp;
}

static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{
unsigned long flags;

atomic_ops_lock(flags);
*addr &= ~mask;
atomic_ops_unlock(flags);
}

#endif /* !CONFIG_ARC_HAS_LLSC */

/**
* __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v
*/
#define __atomic_add_unless(v, a, u) \
({ \
int c, old; \
c = atomic_read(v); \
while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
c = old; \
c; \
})

#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)

#define atomic_inc(v) atomic_add(1, v)
#define atomic_dec(v) atomic_sub(1, v)

#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
#define atomic_inc_return(v) atomic_add_return(1, (v))
#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)

#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)

#define ATOMIC_INIT(i) { (i) }

#include <asm-generic/atomic64.h>

#endif

#endif

#endif
42 changes: 42 additions & 0 deletions trunk/arch/arc/include/asm/barrier.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/

#ifndef __ASM_BARRIER_H
#define __ASM_BARRIER_H

#ifndef __ASSEMBLY__

/* TODO-vineetg: Need to see what this does, don't we need sync anywhere */
#define mb() __asm__ __volatile__ ("" : : : "memory")
#define rmb() mb()
#define wmb() mb()
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define read_barrier_depends() mb()

/* TODO-vineetg verify the correctness of macros here */
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#endif

#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()

#define smp_read_barrier_depends() do { } while (0)

#endif

#endif
Loading

0 comments on commit 26dfdc4

Please sign in to comment.