Skip to content

Commit

Permalink
Blackfin: SMP: make all barriers handle cache issues
Browse files Browse the repository at this point in the history
When suspending/resuming, the common task freezing code will run in
parallel and freeze processes on each core.  This is because the code
uses the non-smp version of memory barriers (as well it should).

The Blackfin smp barrier logic at the moment contains the cache sync
logic, but the non-smp barriers do not.  This is incorrect as Rafel
summarized:
> ...
> The existing memory barriers are SMP barriers too, but they are more
> than _just_ SMP barriers.  At least that's how it is _supposed_ to be
> (eg. rmb() is supposed to be stronger than smp_rmb()).
> ...
> However, looking at the blackfin's definitions of SMP barriers I see
> that it uses extra stuff that should _also_ be used in the definitions
> of the mandatory barriers.
> ...

URL: http://lkml.org/lkml/2011/4/13/11
LKML-Reference: <BANLkTi=F-C-vwX4PGGfbkdTBw3OWL-twfg@mail.gmail.com>
Signed-off-by: Graf Yang <graf.yang@analog.com>
Signed-off-by: Mike Frysinger <vapier@gentoo.org>
  • Loading branch information
Graf Yang authored and Mike Frysinger committed Apr 13, 2011
1 parent 85f2e68 commit 943aee0
Showing 1 changed file with 18 additions and 18 deletions.
36 changes: 18 additions & 18 deletions arch/blackfin/include/asm/system.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,11 @@
* Force strict CPU ordering.
*/
#define nop() __asm__ __volatile__ ("nop;\n\t" : : )
#define mb() __asm__ __volatile__ ("" : : : "memory")
#define rmb() __asm__ __volatile__ ("" : : : "memory")
#define wmb() __asm__ __volatile__ ("" : : : "memory")
#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
#define read_barrier_depends() do { } while(0)
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define smp_read_barrier_depends() read_barrier_depends()

#ifdef CONFIG_SMP
asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value);
Expand All @@ -37,16 +37,16 @@ asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr,
unsigned long new, unsigned long old);

#ifdef __ARCH_SYNC_CORE_DCACHE
# define smp_mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0)
# define smp_rmb() do { barrier(); smp_check_barrier(); } while (0)
# define smp_wmb() do { barrier(); smp_mark_barrier(); } while (0)
#define smp_read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0)

/* Force Core data cache coherence */
# define mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0)
# define rmb() do { barrier(); smp_check_barrier(); } while (0)
# define wmb() do { barrier(); smp_mark_barrier(); } while (0)
# define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0)
#else
# define smp_mb() barrier()
# define smp_rmb() barrier()
# define smp_wmb() barrier()
#define smp_read_barrier_depends() barrier()
# define mb() barrier()
# define rmb() barrier()
# define wmb() barrier()
# define read_barrier_depends() do { } while (0)
#endif

static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
Expand Down Expand Up @@ -99,10 +99,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,

#else /* !CONFIG_SMP */

#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while(0)
#define mb() barrier()
#define rmb() barrier()
#define wmb() barrier()
#define read_barrier_depends() do { } while (0)

struct __xchg_dummy {
unsigned long a[100];
Expand Down

0 comments on commit 943aee0

Please sign in to comment.