diff --git a/[refs] b/[refs] index fd1de1551388..5aa60b8cca06 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: e89227889cec6b66758b44c83f78dba337e5d699 +refs/heads/master: 227510c7f175c44b12cdff6eab316e53dbf71f92 diff --git a/trunk/arch/sparc64/kernel/entry.S b/trunk/arch/sparc64/kernel/entry.S index 88332f00094a..d781f10adc52 100644 --- a/trunk/arch/sparc64/kernel/entry.S +++ b/trunk/arch/sparc64/kernel/entry.S @@ -1600,11 +1600,11 @@ sys_clone: flushw ba,pt %xcc, sparc_do_fork add %sp, PTREGS_OFF, %o2 ret_from_syscall: - /* Clear current_thread_info()->new_child, and - * check performance counter stuff too. + /* Clear SPARC_FLAG_NEWCHILD, switch_to leaves thread.flags in + * %o7 for us. Check performance counter stuff too. */ - stb %g0, [%g6 + TI_NEW_CHILD] - ldx [%g6 + TI_FLAGS], %l0 + andn %o7, _TIF_NEWCHILD, %l0 + stx %l0, [%g6 + TI_FLAGS] call schedule_tail mov %g7, %o0 andcc %l0, _TIF_PERFCTR, %g0 @@ -1720,11 +1720,12 @@ ret_sys_call: /* Check if force_successful_syscall_return() * was invoked. */ - ldub [%curptr + TI_SYS_NOERROR], %l0 - brz,pt %l0, 1f - nop + ldx [%curptr + TI_FLAGS], %l0 + andcc %l0, _TIF_SYSCALL_SUCCESS, %g0 + be,pt %icc, 1f + andn %l0, _TIF_SYSCALL_SUCCESS, %l0 ba,pt %xcc, 80f - stb %g0, [%curptr + TI_SYS_NOERROR] + stx %l0, [%curptr + TI_FLAGS] 1: cmp %o0, -ERESTART_RESTARTBLOCK diff --git a/trunk/arch/sparc64/kernel/irq.c b/trunk/arch/sparc64/kernel/irq.c index c9b69167632a..daa2fb93052c 100644 --- a/trunk/arch/sparc64/kernel/irq.c +++ b/trunk/arch/sparc64/kernel/irq.c @@ -782,14 +782,8 @@ static void distribute_irqs(void) } #endif -struct sun5_timer { - u64 count0; - u64 limit0; - u64 count1; - u64 limit1; -}; -static struct sun5_timer *prom_timers; +struct sun5_timer *prom_timers; static u64 prom_limit0, prom_limit1; static void map_prom_timers(void) @@ -845,6 +839,18 @@ static void kill_prom_timer(void) : "g1", "g2"); } +void enable_prom_timer(void) +{ + if (!prom_timers) + return; + + /* Set it to whatever was there before. */ + prom_timers->limit1 = prom_limit1; + prom_timers->count1 = 0; + prom_timers->limit0 = prom_limit0; + prom_timers->count0 = 0; +} + void init_irqwork_curcpu(void) { register struct irq_work_struct *workp asm("o2"); diff --git a/trunk/arch/sparc64/kernel/process.c b/trunk/arch/sparc64/kernel/process.c index cffb1c8ab4fc..a0cd2b2494d6 100644 --- a/trunk/arch/sparc64/kernel/process.c +++ b/trunk/arch/sparc64/kernel/process.c @@ -621,8 +621,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ)); t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) | + _TIF_NEWCHILD | (((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT); - t->new_child = 1; t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS; t->kregs = (struct pt_regs *)(child_trap_frame+sizeof(struct sparc_stackf)); t->fpsaved[0] = 0; diff --git a/trunk/arch/sparc64/kernel/smp.c b/trunk/arch/sparc64/kernel/smp.c index b9b42491e118..7e8e2919e186 100644 --- a/trunk/arch/sparc64/kernel/smp.c +++ b/trunk/arch/sparc64/kernel/smp.c @@ -137,7 +137,7 @@ void __init smp_callin(void) /* Clear this or we will die instantly when we * schedule back to this idler... */ - current_thread_info()->new_child = 0; + clear_thread_flag(TIF_NEWCHILD); /* Attach to the address space of init_task. */ atomic_inc(&init_mm.mm_count); diff --git a/trunk/arch/sparc64/kernel/traps.c b/trunk/arch/sparc64/kernel/traps.c index 100b0107c4be..a9f4596d7c2b 100644 --- a/trunk/arch/sparc64/kernel/traps.c +++ b/trunk/arch/sparc64/kernel/traps.c @@ -2125,8 +2125,6 @@ void __init trap_init(void) TI_PCR != offsetof(struct thread_info, pcr_reg) || TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) || TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) || - TI_NEW_CHILD != offsetof(struct thread_info, new_child) || - TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) || TI_FPREGS != offsetof(struct thread_info, fpregs) || (TI_FPREGS & (64 - 1))) thread_info_offsets_are_bolixed_dave(); diff --git a/trunk/drivers/sbus/char/Kconfig b/trunk/drivers/sbus/char/Kconfig index a41778a490d6..90d8ef1f0bcc 100644 --- a/trunk/drivers/sbus/char/Kconfig +++ b/trunk/drivers/sbus/char/Kconfig @@ -71,6 +71,20 @@ config SUN_JSFLASH # XXX Why don't we do "source drivers/char/Config.in" somewhere? # no shit +config APM_RTC_IS_GMT + bool + depends on EXPERIMENTAL && SPARC32 && PCI + default y + help + Say Y here if your RTC (Real Time Clock a.k.a. hardware clock) + stores the time in GMT (Greenwich Mean Time). Say N if your RTC + stores localtime. + + It is in fact recommended to store GMT in your RTC, because then you + don't have to worry about daylight savings time changes. The only + reason not to use GMT in your RTC is if you also run a broken OS + that doesn't understand GMT. + config RTC tristate "PC-style Real Time Clock Support" depends on PCI && EXPERIMENTAL && SPARC32 diff --git a/trunk/drivers/sbus/char/aurora.c b/trunk/drivers/sbus/char/aurora.c index d96cc47de566..650d5e924f47 100644 --- a/trunk/drivers/sbus/char/aurora.c +++ b/trunk/drivers/sbus/char/aurora.c @@ -1515,7 +1515,8 @@ static void aurora_close(struct tty_struct * tty, struct file * filp) */ timeout = jiffies+HZ; while(port->SRER & SRER_TXEMPTY) { - msleep_interruptible(jiffies_to_msecs(port->timeout)); + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(port->timeout); if (time_after(jiffies, timeout)) break; } @@ -1532,7 +1533,8 @@ static void aurora_close(struct tty_struct * tty, struct file * filp) port->tty = 0; if (port->blocked_open) { if (port->close_delay) { - msleep_interruptible(jiffies_to_msecs(port->close_delay)); + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(port->close_delay); } wake_up_interruptible(&port->open_wait); } diff --git a/trunk/drivers/sbus/char/bbc_envctrl.c b/trunk/drivers/sbus/char/bbc_envctrl.c index b8a2c7353b0a..d5259f7fee6d 100644 --- a/trunk/drivers/sbus/char/bbc_envctrl.c +++ b/trunk/drivers/sbus/char/bbc_envctrl.c @@ -4,14 +4,13 @@ * Copyright (C) 2001 David S. Miller (davem@redhat.com) */ -#define __KERNEL_SYSCALLS__ - #include #include #include #include #include #include +#define __KERNEL_SYSCALLS__ static int errno; #include diff --git a/trunk/drivers/sbus/char/envctrl.c b/trunk/drivers/sbus/char/envctrl.c index 9a8c572554f5..f6ed35b24f43 100644 --- a/trunk/drivers/sbus/char/envctrl.c +++ b/trunk/drivers/sbus/char/envctrl.c @@ -19,8 +19,6 @@ * Daniele Bellucci */ -#define __KERNEL_SYSCALLS__ - #include #include #include @@ -37,6 +35,7 @@ #include #include +#define __KERNEL_SYSCALLS__ static int errno; #include @@ -1008,7 +1007,7 @@ static int kenvctrld(void *__unused) return -ENODEV; } - poll_interval = 5000; /* TODO env_mon_interval */ + poll_interval = 5 * HZ; /* TODO env_mon_interval */ daemonize("kenvctrld"); allow_signal(SIGKILL); @@ -1017,7 +1016,10 @@ static int kenvctrld(void *__unused) printk(KERN_INFO "envctrl: %s starting...\n", current->comm); for (;;) { - if(msleep_interruptible(poll_interval)) + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(poll_interval); + + if(signal_pending(current)) break; for (whichcpu = 0; whichcpu < ENVCTRL_MAX_CPU; ++whichcpu) { diff --git a/trunk/drivers/sbus/char/vfc_i2c.c b/trunk/drivers/sbus/char/vfc_i2c.c index 1faf1e75f71f..95e3cebf792c 100644 --- a/trunk/drivers/sbus/char/vfc_i2c.c +++ b/trunk/drivers/sbus/char/vfc_i2c.c @@ -88,16 +88,14 @@ void vfc_i2c_delay_wakeup(struct vfc_dev *dev) void vfc_i2c_delay_no_busy(struct vfc_dev *dev, unsigned long usecs) { - DEFINE_WAIT(wait); init_timer(&dev->poll_timer); - dev->poll_timer.expires = jiffies + usecs_to_jiffies(usecs); + dev->poll_timer.expires = jiffies + + ((unsigned long)usecs*(HZ))/1000000; dev->poll_timer.data=(unsigned long)dev; dev->poll_timer.function=(void *)(unsigned long)vfc_i2c_delay_wakeup; add_timer(&dev->poll_timer); - prepare_to_wait(&dev->poll_wait, &wait, TASK_UNINTERRUPTIBLE); - schedule(); + sleep_on(&dev->poll_wait); del_timer(&dev->poll_timer); - finish_wait(&dev->poll_wait, &wait); } void inline vfc_i2c_delay(struct vfc_dev *dev) diff --git a/trunk/include/asm-arm/locks.h b/trunk/include/asm-arm/locks.h index 9cb33fcc06c1..c26298f3891f 100644 --- a/trunk/include/asm-arm/locks.h +++ b/trunk/include/asm-arm/locks.h @@ -61,7 +61,7 @@ " strex ip, lr, [%0]\n" \ " teq ip, #0\n" \ " bne 1b\n" \ -" cmp lr, #0\n" \ +" teq lr, #0\n" \ " movle ip, %0\n" \ " blle " #wake \ : \ @@ -100,7 +100,7 @@ __asm__ __volatile__( \ "@ up_op_read\n" \ "1: ldrex lr, [%0]\n" \ -" adds lr, lr, %1\n" \ +" add lr, lr, %1\n" \ " strex ip, lr, [%0]\n" \ " teq ip, #0\n" \ " bne 1b\n" \ diff --git a/trunk/include/asm-arm/spinlock.h b/trunk/include/asm-arm/spinlock.h index 9705d5eec94c..182323619caa 100644 --- a/trunk/include/asm-arm/spinlock.h +++ b/trunk/include/asm-arm/spinlock.h @@ -79,8 +79,7 @@ typedef struct { } rwlock_t; #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } -#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0) -#define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0) +#define rwlock_init(x) do { *(x) + RW_LOCK_UNLOCKED; } while (0) /* * Write locks are easy - we just set bit 31. When unlocking, we can @@ -101,21 +100,6 @@ static inline void _raw_write_lock(rwlock_t *rw) : "cc", "memory"); } -static inline int _raw_write_trylock(rwlock_t *rw) -{ - unsigned long tmp; - - __asm__ __volatile__( -"1: ldrex %0, [%1]\n" -" teq %0, #0\n" -" strexeq %0, %2, [%1]" - : "=&r" (tmp) - : "r" (&rw->lock), "r" (0x80000000) - : "cc", "memory"); - - return tmp == 0; -} - static inline void _raw_write_unlock(rwlock_t *rw) { __asm__ __volatile__( @@ -154,8 +138,6 @@ static inline void _raw_read_lock(rwlock_t *rw) static inline void _raw_read_unlock(rwlock_t *rw) { - unsigned long tmp, tmp2; - __asm__ __volatile__( "1: ldrex %0, [%2]\n" " sub %0, %0, #1\n" @@ -169,4 +151,19 @@ static inline void _raw_read_unlock(rwlock_t *rw) #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) +static inline int _raw_write_trylock(rwlock_t *rw) +{ + unsigned long tmp; + + __asm__ __volatile__( +"1: ldrex %0, [%1]\n" +" teq %0, #0\n" +" strexeq %0, %2, [%1]" + : "=&r" (tmp) + : "r" (&rw->lock), "r" (0x80000000) + : "cc", "memory"); + + return tmp == 0; +} + #endif /* __ASM_SPINLOCK_H */ diff --git a/trunk/include/asm-sparc64/bitops.h b/trunk/include/asm-sparc64/bitops.h index 9c5e71970287..9d722dc8cca3 100644 --- a/trunk/include/asm-sparc64/bitops.h +++ b/trunk/include/asm-sparc64/bitops.h @@ -20,52 +20,52 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr); /* "non-atomic" versions... */ -static inline void __set_bit(int nr, volatile unsigned long *addr) +static __inline__ void __set_bit(int nr, volatile unsigned long *addr) { - unsigned long *m = ((unsigned long *)addr) + (nr >> 6); + volatile unsigned long *m = addr + (nr >> 6); *m |= (1UL << (nr & 63)); } -static inline void __clear_bit(int nr, volatile unsigned long *addr) +static __inline__ void __clear_bit(int nr, volatile unsigned long *addr) { - unsigned long *m = ((unsigned long *)addr) + (nr >> 6); + volatile unsigned long *m = addr + (nr >> 6); *m &= ~(1UL << (nr & 63)); } -static inline void __change_bit(int nr, volatile unsigned long *addr) +static __inline__ void __change_bit(int nr, volatile unsigned long *addr) { - unsigned long *m = ((unsigned long *)addr) + (nr >> 6); + volatile unsigned long *m = addr + (nr >> 6); *m ^= (1UL << (nr & 63)); } -static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) +static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr) { - unsigned long *m = ((unsigned long *)addr) + (nr >> 6); - unsigned long old = *m; - unsigned long mask = (1UL << (nr & 63)); + volatile unsigned long *m = addr + (nr >> 6); + long old = *m; + long mask = (1UL << (nr & 63)); *m = (old | mask); return ((old & mask) != 0); } -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) +static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr) { - unsigned long *m = ((unsigned long *)addr) + (nr >> 6); - unsigned long old = *m; - unsigned long mask = (1UL << (nr & 63)); + volatile unsigned long *m = addr + (nr >> 6); + long old = *m; + long mask = (1UL << (nr & 63)); *m = (old & ~mask); return ((old & mask) != 0); } -static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) +static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr) { - unsigned long *m = ((unsigned long *)addr) + (nr >> 6); - unsigned long old = *m; - unsigned long mask = (1UL << (nr & 63)); + volatile unsigned long *m = addr + (nr >> 6); + long old = *m; + long mask = (1UL << (nr & 63)); *m = (old ^ mask); return ((old & mask) != 0); @@ -79,13 +79,13 @@ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) #define smp_mb__after_clear_bit() barrier() #endif -static inline int test_bit(int nr, __const__ volatile unsigned long *addr) +static __inline__ int test_bit(int nr, __const__ volatile unsigned long *addr) { - return (1UL & (addr[nr >> 6] >> (nr & 63))) != 0UL; + return (1UL & ((addr)[nr >> 6] >> (nr & 63))) != 0UL; } /* The easy/cheese version for now. */ -static inline unsigned long ffz(unsigned long word) +static __inline__ unsigned long ffz(unsigned long word) { unsigned long result; @@ -103,7 +103,7 @@ static inline unsigned long ffz(unsigned long word) * * Undefined if no bit exists, so code should check against 0 first. */ -static inline unsigned long __ffs(unsigned long word) +static __inline__ unsigned long __ffs(unsigned long word) { unsigned long result = 0; @@ -144,7 +144,7 @@ static inline int sched_find_first_bit(unsigned long *b) * the libc and compiler builtin ffs routines, therefore * differs in spirit from the above ffz (man ffs). */ -static inline int ffs(int x) +static __inline__ int ffs(int x) { if (!x) return 0; @@ -158,7 +158,7 @@ static inline int ffs(int x) #ifdef ULTRA_HAS_POPULATION_COUNT -static inline unsigned int hweight64(unsigned long w) +static __inline__ unsigned int hweight64(unsigned long w) { unsigned int res; @@ -166,7 +166,7 @@ static inline unsigned int hweight64(unsigned long w) return res; } -static inline unsigned int hweight32(unsigned int w) +static __inline__ unsigned int hweight32(unsigned int w) { unsigned int res; @@ -174,7 +174,7 @@ static inline unsigned int hweight32(unsigned int w) return res; } -static inline unsigned int hweight16(unsigned int w) +static __inline__ unsigned int hweight16(unsigned int w) { unsigned int res; @@ -182,7 +182,7 @@ static inline unsigned int hweight16(unsigned int w) return res; } -static inline unsigned int hweight8(unsigned int w) +static __inline__ unsigned int hweight8(unsigned int w) { unsigned int res; @@ -236,7 +236,7 @@ extern unsigned long find_next_zero_bit(const unsigned long *, #define test_and_clear_le_bit(nr,addr) \ test_and_clear_bit((nr) ^ 0x38, (addr)) -static inline int test_le_bit(int nr, __const__ unsigned long * addr) +static __inline__ int test_le_bit(int nr, __const__ unsigned long * addr) { int mask; __const__ unsigned char *ADDR = (__const__ unsigned char *) addr; diff --git a/trunk/include/asm-sparc64/ptrace.h b/trunk/include/asm-sparc64/ptrace.h index 6194f771e9fc..2d2b5a113d24 100644 --- a/trunk/include/asm-sparc64/ptrace.h +++ b/trunk/include/asm-sparc64/ptrace.h @@ -94,9 +94,8 @@ struct sparc_trapf { #define STACKFRAME32_SZ sizeof(struct sparc_stackf32) #ifdef __KERNEL__ -#define force_successful_syscall_return() \ -do { current_thread_info()->syscall_noerror = 1; \ -} while (0) +#define force_successful_syscall_return() \ + set_thread_flag(TIF_SYSCALL_SUCCESS) #define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV)) #define instruction_pointer(regs) ((regs)->tpc) #ifdef CONFIG_SMP diff --git a/trunk/include/asm-sparc64/rwsem.h b/trunk/include/asm-sparc64/rwsem.h index 4568ee4022df..a1cc94f95984 100644 --- a/trunk/include/asm-sparc64/rwsem.h +++ b/trunk/include/asm-sparc64/rwsem.h @@ -46,14 +46,54 @@ extern void __up_read(struct rw_semaphore *sem); extern void __up_write(struct rw_semaphore *sem); extern void __downgrade_write(struct rw_semaphore *sem); -static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) +static __inline__ int rwsem_atomic_update(int delta, struct rw_semaphore *sem) { - return atomic_add_return(delta, (atomic_t *)(&sem->count)); + int tmp = delta; + + __asm__ __volatile__( + "1:\tlduw [%2], %%g1\n\t" + "add %%g1, %1, %%g7\n\t" + "cas [%2], %%g1, %%g7\n\t" + "cmp %%g1, %%g7\n\t" + "membar #StoreLoad | #StoreStore\n\t" + "bne,pn %%icc, 1b\n\t" + " nop\n\t" + "mov %%g7, %0\n\t" + : "=&r" (tmp) + : "0" (tmp), "r" (sem) + : "g1", "g7", "memory", "cc"); + + return tmp + delta; +} + +#define rwsem_atomic_add rwsem_atomic_update + +static __inline__ __u16 rwsem_cmpxchgw(struct rw_semaphore *sem, __u16 __old, __u16 __new) +{ + u32 old = (sem->count & 0xffff0000) | (u32) __old; + u32 new = (old & 0xffff0000) | (u32) __new; + u32 prev; + +again: + __asm__ __volatile__("cas [%2], %3, %0\n\t" + "membar #StoreLoad | #StoreStore" + : "=&r" (prev) + : "0" (new), "r" (sem), "r" (old) + : "memory"); + + /* To give the same semantics as x86 cmpxchgw, keep trying + * if only the upper 16-bits changed. + */ + if (prev != old && + ((prev & 0xffff) == (old & 0xffff))) + goto again; + + return prev & 0xffff; } -static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) +static __inline__ signed long rwsem_cmpxchg(struct rw_semaphore *sem, signed long old, signed long new) { - atomic_add(delta, (atomic_t *)(&sem->count)); + return cmpxchg(&sem->count,old,new); } #endif /* __KERNEL__ */ diff --git a/trunk/include/asm-sparc64/spitfire.h b/trunk/include/asm-sparc64/spitfire.h index 962638c9d122..1aa932773af8 100644 --- a/trunk/include/asm-sparc64/spitfire.h +++ b/trunk/include/asm-sparc64/spitfire.h @@ -56,6 +56,52 @@ extern void cheetah_enable_pcache(void); SPITFIRE_HIGHEST_LOCKED_TLBENT : \ CHEETAH_HIGHEST_LOCKED_TLBENT) +static __inline__ unsigned long spitfire_get_isfsr(void) +{ + unsigned long ret; + + __asm__ __volatile__("ldxa [%1] %2, %0" + : "=r" (ret) + : "r" (TLB_SFSR), "i" (ASI_IMMU)); + return ret; +} + +static __inline__ unsigned long spitfire_get_dsfsr(void) +{ + unsigned long ret; + + __asm__ __volatile__("ldxa [%1] %2, %0" + : "=r" (ret) + : "r" (TLB_SFSR), "i" (ASI_DMMU)); + return ret; +} + +static __inline__ unsigned long spitfire_get_sfar(void) +{ + unsigned long ret; + + __asm__ __volatile__("ldxa [%1] %2, %0" + : "=r" (ret) + : "r" (DMMU_SFAR), "i" (ASI_DMMU)); + return ret; +} + +static __inline__ void spitfire_put_isfsr(unsigned long sfsr) +{ + __asm__ __volatile__("stxa %0, [%1] %2\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_IMMU)); +} + +static __inline__ void spitfire_put_dsfsr(unsigned long sfsr) +{ + __asm__ __volatile__("stxa %0, [%1] %2\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_DMMU)); +} + /* The data cache is write through, so this just invalidates the * specified line. */ @@ -147,6 +193,90 @@ static __inline__ void spitfire_put_itlb_data(int entry, unsigned long data) "i" (ASI_ITLB_DATA_ACCESS)); } +/* Spitfire hardware assisted TLB flushes. */ + +/* Context level flushes. */ +static __inline__ void spitfire_flush_dtlb_primary_context(void) +{ + __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" + "membar #Sync" + : /* No outputs */ + : "r" (0x40), "i" (ASI_DMMU_DEMAP)); +} + +static __inline__ void spitfire_flush_itlb_primary_context(void) +{ + __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" + "membar #Sync" + : /* No outputs */ + : "r" (0x40), "i" (ASI_IMMU_DEMAP)); +} + +static __inline__ void spitfire_flush_dtlb_secondary_context(void) +{ + __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" + "membar #Sync" + : /* No outputs */ + : "r" (0x50), "i" (ASI_DMMU_DEMAP)); +} + +static __inline__ void spitfire_flush_itlb_secondary_context(void) +{ + __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" + "membar #Sync" + : /* No outputs */ + : "r" (0x50), "i" (ASI_IMMU_DEMAP)); +} + +static __inline__ void spitfire_flush_dtlb_nucleus_context(void) +{ + __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" + "membar #Sync" + : /* No outputs */ + : "r" (0x60), "i" (ASI_DMMU_DEMAP)); +} + +static __inline__ void spitfire_flush_itlb_nucleus_context(void) +{ + __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" + "membar #Sync" + : /* No outputs */ + : "r" (0x60), "i" (ASI_IMMU_DEMAP)); +} + +/* Page level flushes. */ +static __inline__ void spitfire_flush_dtlb_primary_page(unsigned long page) +{ + __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" + "membar #Sync" + : /* No outputs */ + : "r" (page), "i" (ASI_DMMU_DEMAP)); +} + +static __inline__ void spitfire_flush_itlb_primary_page(unsigned long page) +{ + __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" + "membar #Sync" + : /* No outputs */ + : "r" (page), "i" (ASI_IMMU_DEMAP)); +} + +static __inline__ void spitfire_flush_dtlb_secondary_page(unsigned long page) +{ + __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" + "membar #Sync" + : /* No outputs */ + : "r" (page | 0x10), "i" (ASI_DMMU_DEMAP)); +} + +static __inline__ void spitfire_flush_itlb_secondary_page(unsigned long page) +{ + __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" + "membar #Sync" + : /* No outputs */ + : "r" (page | 0x10), "i" (ASI_IMMU_DEMAP)); +} + static __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page) { __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" diff --git a/trunk/include/asm-sparc64/system.h b/trunk/include/asm-sparc64/system.h index ee4bdfc6b88f..f9be2c5b4dc9 100644 --- a/trunk/include/asm-sparc64/system.h +++ b/trunk/include/asm-sparc64/system.h @@ -190,23 +190,24 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ "wrpr %%g1, %%cwp\n\t" \ "ldx [%%g6 + %3], %%o6\n\t" \ "ldub [%%g6 + %2], %%o5\n\t" \ - "ldub [%%g6 + %4], %%o7\n\t" \ + "ldx [%%g6 + %4], %%o7\n\t" \ "mov %%g6, %%l2\n\t" \ "wrpr %%o5, 0x0, %%wstate\n\t" \ "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ "wrpr %%g0, 0x94, %%pstate\n\t" \ "mov %%l2, %%g6\n\t" \ - "ldx [%%g6 + %6], %%g4\n\t" \ + "ldx [%%g6 + %7], %%g4\n\t" \ "wrpr %%g0, 0x96, %%pstate\n\t" \ - "brz,pt %%o7, 1f\n\t" \ + "andcc %%o7, %6, %%g0\n\t" \ + "beq,pt %%icc, 1f\n\t" \ " mov %%g7, %0\n\t" \ "b,a ret_from_syscall\n\t" \ "1:\n\t" \ : "=&r" (last) \ : "0" (next->thread_info), \ - "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \ - "i" (TI_CWP), "i" (TI_TASK) \ + "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_FLAGS), "i" (TI_CWP), \ + "i" (_TIF_NEWCHILD), "i" (TI_TASK) \ : "cc", \ "g1", "g2", "g3", "g7", \ "l2", "l3", "l4", "l5", "l6", "l7", \ diff --git a/trunk/include/asm-sparc64/thread_info.h b/trunk/include/asm-sparc64/thread_info.h index 352d9943661a..a1d25c06f92a 100644 --- a/trunk/include/asm-sparc64/thread_info.h +++ b/trunk/include/asm-sparc64/thread_info.h @@ -47,9 +47,7 @@ struct thread_info { struct pt_regs *kregs; struct exec_domain *exec_domain; int preempt_count; /* 0 => preemptable, <0 => BUG */ - __u8 new_child; - __u8 syscall_noerror; - __u16 __pad; + int __pad; unsigned long *utraps; @@ -89,8 +87,6 @@ struct thread_info { #define TI_KREGS 0x00000028 #define TI_EXEC_DOMAIN 0x00000030 #define TI_PRE_COUNT 0x00000038 -#define TI_NEW_CHILD 0x0000003c -#define TI_SYS_NOERROR 0x0000003d #define TI_UTRAPS 0x00000040 #define TI_REG_WINDOW 0x00000048 #define TI_RWIN_SPTRS 0x000003c8 @@ -223,10 +219,10 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */ #define TIF_NEWSIGNALS 6 /* wants new-style signals */ #define TIF_32BIT 7 /* 32-bit binary */ -/* flag bit 8 is available */ +#define TIF_NEWCHILD 8 /* just-spawned child process */ #define TIF_SECCOMP 9 /* secure computing */ #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */ -/* flag bit 11 is available */ +#define TIF_SYSCALL_SUCCESS 11 /* NOTE: Thread flags >= 12 should be ones we have no interest * in using in assembly, else we can't use the mask as * an immediate value in instructions such as andcc. @@ -243,8 +239,10 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define _TIF_UNALIGNED (1< +/* How timers work: + * + * On uniprocessors we just use counter zero for the system wide + * ticker, this performs thread scheduling, clock book keeping, + * and runs timer based events. Previously we used the Ultra + * %tick interrupt for this purpose. + * + * On multiprocessors we pick one cpu as the master level 10 tick + * processor. Here this counter zero tick handles clock book + * keeping and timer events only. Each Ultra has it's level + * 14 %tick interrupt set to fire off as well, even the master + * tick cpu runs this locally. This ticker performs thread + * scheduling, system/user tick counting for the current thread, + * and also profiling if enabled. + */ + #include +/* Two timers, traditionally steered to PIL's 10 and 14 respectively. + * But since INO packets are used on sun5, we could use any PIL level + * we like, however for now we use the normal ones. + * + * The 'reg' and 'interrupts' properties for these live in nodes named + * 'counter-timer'. The first of three 'reg' properties describe where + * the sun5_timer registers are. The other two I have no idea. (XXX) + */ +struct sun5_timer { + u64 count0; + u64 limit0; + u64 count1; + u64 limit1; +}; + +#define SUN5_LIMIT_ENABLE 0x80000000 +#define SUN5_LIMIT_TOZERO 0x40000000 +#define SUN5_LIMIT_ZRESTART 0x20000000 +#define SUN5_LIMIT_CMASK 0x1fffffff + +/* Given a HZ value, set the limit register to so that the timer IRQ + * gets delivered that often. + */ +#define SUN5_HZ_TO_LIMIT(__hz) (1000000/(__hz)) + struct sparc64_tick_ops { void (*init_tick)(unsigned long); unsigned long (*get_tick)(void); diff --git a/trunk/net/ipv6/ip6_tunnel.c b/trunk/net/ipv6/ip6_tunnel.c index f39ddeae1eef..09613729404c 100644 --- a/trunk/net/ipv6/ip6_tunnel.c +++ b/trunk/net/ipv6/ip6_tunnel.c @@ -1123,7 +1123,7 @@ static inline int ip6ip6_register(void) static inline int ip6ip6_unregister(void) { - return xfrm6_tunnel_unregister(&ip6ip6_handler); + return xfrm6_tunnel_deregister(&ip6ip6_handler); } #else static struct inet6_protocol xfrm6_tunnel_protocol = {