Skip to content

Commit

Permalink
[PATCH] powerpc: merge atomic.h, memory.h
Browse files Browse the repository at this point in the history
powerpc: Merge atomic.h and memory.h into powerpc

Merged atomic.h into include/powerpc.  Moved asm-style HMT_ defines from
memory.h into ppc_asm.h, where there were already HMT_defines; moved c-style
HMT_ defines to processor.h. Renamed memory.h to synch.h to better reflect
its contents.

Signed-off-by: Kumar Gala <kumar.gala@freescale.com>
Signed-off-by: Becky Bruce <becky.bruce@freescale.com>
Signed-off-by: Jon Loeliger <linuxppc@jdl.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
  • Loading branch information
Becky Bruce authored and Paul Mackerras committed Sep 25, 2005
1 parent 2bfadee commit feaf7cf
Show file tree
Hide file tree
Showing 11 changed files with 88 additions and 298 deletions.
45 changes: 20 additions & 25 deletions include/asm-ppc/atomic.h → include/asm-powerpc/atomic.h
Original file line number Diff line number Diff line change
@@ -1,29 +1,20 @@
#ifndef _ASM_POWERPC_ATOMIC_H_
#define _ASM_POWERPC_ATOMIC_H_

/*
* PowerPC atomic operations
*/

#ifndef _ASM_PPC_ATOMIC_H_
#define _ASM_PPC_ATOMIC_H_

typedef struct { volatile int counter; } atomic_t;

#ifdef __KERNEL__
#include <asm/synch.h>

#define ATOMIC_INIT(i) { (i) }
#define ATOMIC_INIT(i) { (i) }

#define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))

extern void atomic_clear_mask(unsigned long mask, unsigned long *addr);

#ifdef CONFIG_SMP
#define SMP_SYNC "sync"
#define SMP_ISYNC "\n\tisync"
#else
#define SMP_SYNC ""
#define SMP_ISYNC
#endif

/* Erratum #77 on the 405 means we need a sync or dcbt before every stwcx.
* The old ATOMIC_SYNC_FIX covered some but not all of this.
*/
Expand Down Expand Up @@ -53,12 +44,13 @@ static __inline__ int atomic_add_return(int a, atomic_t *v)
int t;

__asm__ __volatile__(
EIEIO_ON_SMP
"1: lwarx %0,0,%2 # atomic_add_return\n\
add %0,%1,%0\n"
PPC405_ERR77(0,%2)
" stwcx. %0,0,%2 \n\
bne- 1b"
SMP_ISYNC
ISYNC_ON_SMP
: "=&r" (t)
: "r" (a), "r" (&v->counter)
: "cc", "memory");
Expand Down Expand Up @@ -88,12 +80,13 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v)
int t;

__asm__ __volatile__(
EIEIO_ON_SMP
"1: lwarx %0,0,%2 # atomic_sub_return\n\
subf %0,%1,%0\n"
PPC405_ERR77(0,%2)
" stwcx. %0,0,%2 \n\
bne- 1b"
SMP_ISYNC
ISYNC_ON_SMP
: "=&r" (t)
: "r" (a), "r" (&v->counter)
: "cc", "memory");
Expand Down Expand Up @@ -121,12 +114,13 @@ static __inline__ int atomic_inc_return(atomic_t *v)
int t;

__asm__ __volatile__(
EIEIO_ON_SMP
"1: lwarx %0,0,%1 # atomic_inc_return\n\
addic %0,%0,1\n"
PPC405_ERR77(0,%1)
" stwcx. %0,0,%1 \n\
bne- 1b"
SMP_ISYNC
ISYNC_ON_SMP
: "=&r" (t)
: "r" (&v->counter)
: "cc", "memory");
Expand Down Expand Up @@ -164,12 +158,13 @@ static __inline__ int atomic_dec_return(atomic_t *v)
int t;

__asm__ __volatile__(
EIEIO_ON_SMP
"1: lwarx %0,0,%1 # atomic_dec_return\n\
addic %0,%0,-1\n"
PPC405_ERR77(0,%1)
" stwcx. %0,0,%1\n\
bne- 1b"
SMP_ISYNC
ISYNC_ON_SMP
: "=&r" (t)
: "r" (&v->counter)
: "cc", "memory");
Expand All @@ -189,13 +184,14 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
int t;

__asm__ __volatile__(
EIEIO_ON_SMP
"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
addic. %0,%0,-1\n\
blt- 2f\n"
PPC405_ERR77(0,%1)
" stwcx. %0,0,%1\n\
bne- 1b"
SMP_ISYNC
ISYNC_ON_SMP
"\n\
2:" : "=&r" (t)
: "r" (&v->counter)
Expand All @@ -204,11 +200,10 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
return t;
}

#define __MB __asm__ __volatile__ (SMP_SYNC : : : "memory")
#define smp_mb__before_atomic_dec() __MB
#define smp_mb__after_atomic_dec() __MB
#define smp_mb__before_atomic_inc() __MB
#define smp_mb__after_atomic_inc() __MB
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()

#endif /* __KERNEL__ */
#endif /* _ASM_PPC_ATOMIC_H_ */
#endif /* _ASM_POWERPC_ATOMIC_H_ */
3 changes: 3 additions & 0 deletions include/asm-powerpc/ppc_asm.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,11 @@
#define REST_32EVRS(n,s,base) REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base)

/* Macros to adjust thread priority for Iseries hardware multithreading */
#define HMT_VERY_LOW or 31,31,31 # very low priority\n"
#define HMT_LOW or 1,1,1
#define HMT_MEDIUM_LOW or 6,6,6 # medium low priority\n"
#define HMT_MEDIUM or 2,2,2
#define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority\n"
#define HMT_HIGH or 3,3,3

/* handle instructions that older assemblers may not know */
Expand Down
51 changes: 51 additions & 0 deletions include/asm-powerpc/synch.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
#ifndef _ASM_POWERPC_SYNCH_H
#define _ASM_POWERPC_SYNCH_H

#include <linux/config.h>

#ifdef __powerpc64__
#define __SUBARCH_HAS_LWSYNC
#endif

#ifdef __SUBARCH_HAS_LWSYNC
# define LWSYNC lwsync
#else
# define LWSYNC sync
#endif


/*
* Arguably the bitops and *xchg operations don't imply any memory barrier
* or SMP ordering, but in fact a lot of drivers expect them to imply
* both, since they do on x86 cpus.
*/
#ifdef CONFIG_SMP
#define EIEIO_ON_SMP "eieio\n"
#define ISYNC_ON_SMP "\n\tisync"
#define SYNC_ON_SMP __stringify(LWSYNC) "\n"
#else
#define EIEIO_ON_SMP
#define ISYNC_ON_SMP
#define SYNC_ON_SMP
#endif

static inline void eieio(void)
{
__asm__ __volatile__ ("eieio" : : : "memory");
}

static inline void isync(void)
{
__asm__ __volatile__ ("isync" : : : "memory");
}

#ifdef CONFIG_SMP
#define eieio_on_smp() eieio()
#define isync_on_smp() isync()
#else
#define eieio_on_smp() __asm__ __volatile__("": : :"memory")
#define isync_on_smp() __asm__ __volatile__("": : :"memory")
#endif

#endif /* _ASM_POWERPC_SYNCH_H */

11 changes: 1 addition & 10 deletions include/asm-ppc/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

#include <asm/page.h>
#include <asm/byteorder.h>
#include <asm/synch.h>
#include <asm/mmu.h>

#define SIO_CONFIG_RA 0x398
Expand Down Expand Up @@ -440,16 +441,6 @@ extern inline void * phys_to_virt(unsigned long address)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define page_to_bus(page) (page_to_phys(page) + PCI_DRAM_OFFSET)

/*
* Enforce In-order Execution of I/O:
* Acts as a barrier to ensure all previous I/O accesses have
* completed before any further ones are issued.
*/
extern inline void eieio(void)
{
__asm__ __volatile__ ("eieio" : : : "memory");
}

/* Enforce in-order execution of data I/O.
* No distinction between read/write on PPC; use eieio for all three.
*/
Expand Down
Loading

0 comments on commit feaf7cf

Please sign in to comment.