Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 335654
b: refs/heads/master
c: e801745
h: refs/heads/master
v: v3
  • Loading branch information
Linus Torvalds committed Nov 15, 2012
1 parent cfd2962 commit 8475db9
Show file tree
Hide file tree
Showing 26 changed files with 1,123 additions and 280 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: aa1e3e81e75ceb3d977c3292cefafcd5179eb8b8
refs/heads/master: e8017454ffb4e99f94b1a7e6dcb9ed519ee93441
1 change: 1 addition & 0 deletions trunk/arch/mips/cavium-octeon/executive/cvmx-l2c.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
* measurement, and debugging facilities.
*/

#include <linux/irqflags.h>
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-l2c.h>
#include <asm/octeon/cvmx-spinlock.h>
Expand Down
128 changes: 39 additions & 89 deletions trunk/arch/mips/include/asm/bitops.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
#endif

#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/byteorder.h> /* sigh ... */
Expand Down Expand Up @@ -44,6 +43,24 @@
#define smp_mb__before_clear_bit() smp_mb__before_llsc()
#define smp_mb__after_clear_bit() smp_llsc_mb()


/*
* These are the "slower" versions of the functions and are in bitops.c.
* These functions call raw_local_irq_{save,restore}().
*/
void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
int __mips_test_and_set_bit(unsigned long nr,
volatile unsigned long *addr);
int __mips_test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr);
int __mips_test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr);
int __mips_test_and_change_bit(unsigned long nr,
volatile unsigned long *addr);


/*
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
Expand All @@ -57,7 +74,7 @@
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
unsigned long temp;

if (kernel_uses_llsc && R10000_LLSC_WAR) {
Expand Down Expand Up @@ -92,17 +109,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
: "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit));
} while (unlikely(!temp));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;

a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a |= mask;
raw_local_irq_restore(flags);
}
} else
__mips_set_bit(nr, addr);
}

/*
Expand All @@ -118,7 +126,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
unsigned long temp;

if (kernel_uses_llsc && R10000_LLSC_WAR) {
Expand Down Expand Up @@ -153,17 +161,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
: "=&r" (temp), "+m" (*m)
: "ir" (~(1UL << bit)));
} while (unlikely(!temp));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;

a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a &= ~mask;
raw_local_irq_restore(flags);
}
} else
__mips_clear_bit(nr, addr);
}

/*
Expand Down Expand Up @@ -191,7 +190,7 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad
*/
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;

if (kernel_uses_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
Expand Down Expand Up @@ -220,17 +219,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
: "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit));
} while (unlikely(!temp));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;

a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a ^= mask;
raw_local_irq_restore(flags);
}
} else
__mips_change_bit(nr, addr);
}

/*
Expand All @@ -244,7 +234,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
static inline int test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
unsigned long res;

smp_mb__before_llsc();
Expand Down Expand Up @@ -281,18 +271,8 @@ static inline int test_and_set_bit(unsigned long nr,
} while (unlikely(!res));

res = temp & (1UL << bit);
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;

a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a |= mask;
raw_local_irq_restore(flags);
}
} else
res = __mips_test_and_set_bit(nr, addr);

smp_llsc_mb();

Expand All @@ -310,7 +290,7 @@ static inline int test_and_set_bit(unsigned long nr,
static inline int test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
unsigned long res;

if (kernel_uses_llsc && R10000_LLSC_WAR) {
Expand Down Expand Up @@ -345,18 +325,8 @@ static inline int test_and_set_bit_lock(unsigned long nr,
} while (unlikely(!res));

res = temp & (1UL << bit);
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;

a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a |= mask;
raw_local_irq_restore(flags);
}
} else
res = __mips_test_and_set_bit_lock(nr, addr);

smp_llsc_mb();

Expand All @@ -373,7 +343,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
static inline int test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
unsigned long res;

smp_mb__before_llsc();
Expand Down Expand Up @@ -428,18 +398,8 @@ static inline int test_and_clear_bit(unsigned long nr,
} while (unlikely(!res));

res = temp & (1UL << bit);
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;

a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a &= ~mask;
raw_local_irq_restore(flags);
}
} else
res = __mips_test_and_clear_bit(nr, addr);

smp_llsc_mb();

Expand All @@ -457,7 +417,7 @@ static inline int test_and_clear_bit(unsigned long nr,
static inline int test_and_change_bit(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
unsigned long res;

smp_mb__before_llsc();
Expand Down Expand Up @@ -494,18 +454,8 @@ static inline int test_and_change_bit(unsigned long nr,
} while (unlikely(!res));

res = temp & (1UL << bit);
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;

a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a ^= mask;
raw_local_irq_restore(flags);
}
} else
res = __mips_test_and_change_bit(nr, addr);

smp_llsc_mb();

Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/mips/include/asm/compat.h
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ struct compat_shmid64_ds {

static inline int is_compat_task(void)
{
return test_thread_flag(TIF_32BIT);
return test_thread_flag(TIF_32BIT_ADDR);
}

#endif /* _ASM_COMPAT_H */
1 change: 1 addition & 0 deletions trunk/arch/mips/include/asm/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/irqflags.h>

#include <asm/addrspace.h>
#include <asm/bug.h>
Expand Down
Loading

0 comments on commit 8475db9

Please sign in to comment.