Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 313726
b: refs/heads/master
c: af3bf7f
h: refs/heads/master
v: v3
  • Loading branch information
Benjamin Herrenschmidt committed Jul 13, 2012
1 parent b0c815b commit bde41e1
Show file tree
Hide file tree
Showing 47 changed files with 433 additions and 272 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: b915341b4be29b3b2c02da932b69871e9b55ca4b
refs/heads/master: af3bf7fbe0148aadc8fedd6f5e5279cabef9ced5
4 changes: 2 additions & 2 deletions trunk/arch/powerpc/include/asm/exception-64s.h
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,7 @@ label##_hv: \

#define RUNLATCH_ON \
BEGIN_FTR_SECTION \
clrrdi r3,r1,THREAD_SHIFT; \
CURRENT_THREAD_INFO(r3, r1); \
ld r4,TI_LOCAL_FLAGS(r3); \
andi. r0,r4,_TLF_RUNLATCH; \
beql ppc64_runlatch_on_trampoline; \
Expand Down Expand Up @@ -332,7 +332,7 @@ label##_common: \
#ifdef CONFIG_PPC_970_NAP
#define FINISH_NAP \
BEGIN_FTR_SECTION \
clrrdi r11,r1,THREAD_SHIFT; \
CURRENT_THREAD_INFO(r11, r1); \
ld r9,TI_LOCAL_FLAGS(r11); \
andi. r10,r9,_TLF_NAPPING; \
bnel power4_fixup_nap; \
Expand Down
8 changes: 8 additions & 0 deletions trunk/arch/powerpc/include/asm/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,14 @@ extern int check_legacy_ioport(unsigned long base_port);
#define _PNPWRP 0xa79
#define PNPBIOS_BASE 0xf000

#if defined(CONFIG_PPC64) && defined(CONFIG_PCI)
extern struct pci_dev *isa_bridge_pcidev;
/*
* has legacy ISA devices ?
*/
#define arch_has_dev_port() (isa_bridge_pcidev != NULL)
#endif

#include <linux/device.h>
#include <linux/io.h>

Expand Down
1 change: 1 addition & 0 deletions trunk/arch/powerpc/include/asm/kvm_book3s_asm.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ struct kvmppc_host_state {
ulong vmhandler;
ulong scratch0;
ulong scratch1;
ulong sprg3;
u8 in_guest;
u8 restore_hid5;
u8 napping;
Expand Down
8 changes: 5 additions & 3 deletions trunk/arch/powerpc/include/asm/reg.h
Original file line number Diff line number Diff line change
Expand Up @@ -491,6 +491,7 @@
#define SPRN_SPRG1 0x111 /* Special Purpose Register General 1 */
#define SPRN_SPRG2 0x112 /* Special Purpose Register General 2 */
#define SPRN_SPRG3 0x113 /* Special Purpose Register General 3 */
#define SPRN_USPRG3 0x103 /* SPRG3 userspace read */
#define SPRN_SPRG4 0x114 /* Special Purpose Register General 4 */
#define SPRN_SPRG5 0x115 /* Special Purpose Register General 5 */
#define SPRN_SPRG6 0x116 /* Special Purpose Register General 6 */
Expand Down Expand Up @@ -753,14 +754,14 @@
* 64-bit server:
* - SPRG0 unused (reserved for HV on Power4)
* - SPRG2 scratch for exception vectors
* - SPRG3 unused (user visible)
* - SPRG3 CPU and NUMA node for VDSO getcpu (user visible)
* - HSPRG0 stores PACA in HV mode
* - HSPRG1 scratch for "HV" exceptions
*
* 64-bit embedded
* - SPRG0 generic exception scratch
* - SPRG2 TLB exception stack
* - SPRG3 unused (user visible)
* - SPRG3 CPU and NUMA node for VDSO getcpu (user visible)
* - SPRG4 unused (user visible)
* - SPRG6 TLB miss scratch (user visible, sorry !)
* - SPRG7 critical exception scratch
Expand Down Expand Up @@ -1024,7 +1025,8 @@
/* Macros for setting and retrieving special purpose registers */
#ifndef __ASSEMBLY__
#define mfmsr() ({unsigned long rval; \
asm volatile("mfmsr %0" : "=r" (rval)); rval;})
asm volatile("mfmsr %0" : "=r" (rval) : \
: "memory"); rval;})
#ifdef CONFIG_PPC_BOOK3S_64
#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
: : "r" (v) : "memory")
Expand Down
6 changes: 6 additions & 0 deletions trunk/arch/powerpc/include/asm/thread_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,12 @@

#define THREAD_SIZE (1 << THREAD_SHIFT)

#ifdef CONFIG_PPC64
#define CURRENT_THREAD_INFO(dest, sp) clrrdi dest, sp, THREAD_SHIFT
#else
#define CURRENT_THREAD_INFO(dest, sp) rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT
#endif

#ifndef __ASSEMBLY__
#include <linux/cache.h>
#include <asm/processor.h>
Expand Down
2 changes: 2 additions & 0 deletions trunk/arch/powerpc/include/asm/vdso.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ extern unsigned long vdso64_rt_sigtramp;
extern unsigned long vdso32_sigtramp;
extern unsigned long vdso32_rt_sigtramp;

int __cpuinit vdso_getcpu_init(void);

#else /* __ASSEMBLY__ */

#ifdef __VDSO64__
Expand Down
1 change: 1 addition & 0 deletions trunk/arch/powerpc/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -533,6 +533,7 @@ int main(void)
HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
HSTATE_FIELD(HSTATE_SPRG3, sprg3);
HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
HSTATE_FIELD(HSTATE_NAPPING, napping);
Expand Down
30 changes: 12 additions & 18 deletions trunk/arch/powerpc/kernel/entry_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ crit_transfer_to_handler:
mfspr r8,SPRN_SPRG_THREAD
lwz r0,KSP_LIMIT(r8)
stw r0,SAVED_KSP_LIMIT(r11)
rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
CURRENT_THREAD_INFO(r0, r1)
stw r0,KSP_LIMIT(r8)
/* fall through */
#endif
Expand All @@ -112,7 +112,7 @@ crit_transfer_to_handler:
mfspr r8,SPRN_SPRG_THREAD
lwz r0,KSP_LIMIT(r8)
stw r0,saved_ksp_limit@l(0)
rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
CURRENT_THREAD_INFO(r0, r1)
stw r0,KSP_LIMIT(r8)
/* fall through */
#endif
Expand Down Expand Up @@ -158,7 +158,7 @@ transfer_to_handler:
tophys(r11,r11)
addi r11,r11,global_dbcr0@l
#ifdef CONFIG_SMP
rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_CPU(r9)
slwi r9,r9,3
add r11,r11,r9
Expand All @@ -179,7 +179,7 @@ transfer_to_handler:
ble- stack_ovf /* then the kernel stack overflowed */
5:
#if defined(CONFIG_6xx) || defined(CONFIG_E500)
rlwinm r9,r1,0,0,31-THREAD_SHIFT
CURRENT_THREAD_INFO(r9, r1)
tophys(r9,r9) /* check local flags */
lwz r12,TI_LOCAL_FLAGS(r9)
mtcrf 0x01,r12
Expand Down Expand Up @@ -226,13 +226,7 @@ reenable_mmu: /* re-enable mmu so we can */
stw r3,16(r1)
stw r4,20(r1)
stw r5,24(r1)
andi. r12,r12,MSR_PR
b 11f
bl trace_hardirqs_off
b 12f
11:
bl trace_hardirqs_off
12:
lwz r5,24(r1)
lwz r4,20(r1)
lwz r3,16(r1)
Expand Down Expand Up @@ -333,7 +327,7 @@ _GLOBAL(DoSyscall)
mtmsr r11
1:
#endif /* CONFIG_TRACE_IRQFLAGS */
rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
CURRENT_THREAD_INFO(r10, r1)
lwz r11,TI_FLAGS(r10)
andi. r11,r11,_TIF_SYSCALL_T_OR_A
bne- syscall_dotrace
Expand All @@ -354,7 +348,7 @@ ret_from_syscall:
bl do_show_syscall_exit
#endif
mr r6,r3
rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
CURRENT_THREAD_INFO(r12, r1)
/* disable interrupts so current_thread_info()->flags can't change */
LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
/* Note: We don't bother telling lockdep about it */
Expand Down Expand Up @@ -815,7 +809,7 @@ ret_from_except:

user_exc_return: /* r10 contains MSR_KERNEL here */
/* Check current_thread_info()->flags */
rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_FLAGS(r9)
andi. r0,r9,_TIF_USER_WORK_MASK
bne do_work
Expand All @@ -835,7 +829,7 @@ restore_user:
/* N.B. the only way to get here is from the beq following ret_from_except. */
resume_kernel:
/* check current_thread_info->preempt_count */
rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
CURRENT_THREAD_INFO(r9, r1)
lwz r0,TI_PREEMPT(r9)
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore
Expand All @@ -852,7 +846,7 @@ resume_kernel:
bl trace_hardirqs_off
#endif
1: bl preempt_schedule_irq
rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
CURRENT_THREAD_INFO(r9, r1)
lwz r3,TI_FLAGS(r9)
andi. r0,r3,_TIF_NEED_RESCHED
bne- 1b
Expand Down Expand Up @@ -1122,7 +1116,7 @@ ret_from_debug_exc:
lwz r10,SAVED_KSP_LIMIT(r1)
stw r10,KSP_LIMIT(r9)
lwz r9,THREAD_INFO-THREAD(r9)
rlwinm r10,r1,0,0,(31-THREAD_SHIFT)
CURRENT_THREAD_INFO(r10, r1)
lwz r10,TI_PREEMPT(r10)
stw r10,TI_PREEMPT(r9)
RESTORE_xSRR(SRR0,SRR1);
Expand Down Expand Up @@ -1156,7 +1150,7 @@ load_dbcr0:
lis r11,global_dbcr0@ha
addi r11,r11,global_dbcr0@l
#ifdef CONFIG_SMP
rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_CPU(r9)
slwi r9,r9,3
add r11,r11,r9
Expand Down Expand Up @@ -1197,7 +1191,7 @@ recheck:
LOAD_MSR_KERNEL(r10,MSR_KERNEL)
SYNC
MTMSRD(r10) /* disable interrupts */
rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_FLAGS(r9)
andi. r0,r9,_TIF_NEED_RESCHED
bne- do_resched
Expand Down
12 changes: 6 additions & 6 deletions trunk/arch/powerpc/kernel/entry_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
REST_2GPRS(7,r1)
addi r9,r1,STACK_FRAME_OVERHEAD
#endif
clrrdi r11,r1,THREAD_SHIFT
CURRENT_THREAD_INFO(r11, r1)
ld r10,TI_FLAGS(r11)
andi. r11,r10,_TIF_SYSCALL_T_OR_A
bne- syscall_dotrace
Expand Down Expand Up @@ -181,7 +181,7 @@ syscall_exit:
bl .do_show_syscall_exit
ld r3,RESULT(r1)
#endif
clrrdi r12,r1,THREAD_SHIFT
CURRENT_THREAD_INFO(r12, r1)

ld r8,_MSR(r1)
#ifdef CONFIG_PPC_BOOK3S
Expand Down Expand Up @@ -260,7 +260,7 @@ syscall_dotrace:
ld r7,GPR7(r1)
ld r8,GPR8(r1)
addi r9,r1,STACK_FRAME_OVERHEAD
clrrdi r10,r1,THREAD_SHIFT
CURRENT_THREAD_INFO(r10, r1)
ld r10,TI_FLAGS(r10)
b .Lsyscall_dotrace_cont

Expand Down Expand Up @@ -500,7 +500,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
2:
#endif /* !CONFIG_PPC_BOOK3S */

clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
CURRENT_THREAD_INFO(r7, r8) /* base of new stack */
/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
because we don't need to leave the 288-byte ABI gap at the
top of the kernel stack. */
Expand Down Expand Up @@ -559,7 +559,7 @@ _GLOBAL(ret_from_except_lite)
mtmsrd r10,1 /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */

clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
CURRENT_THREAD_INFO(r9, r1)
ld r3,_MSR(r1)
ld r4,TI_FLAGS(r9)
andi. r3,r3,MSR_PR
Expand Down Expand Up @@ -602,7 +602,7 @@ resume_kernel:
1: bl .preempt_schedule_irq

/* Re-test flags and eventually loop */
clrrdi r9,r1,THREAD_SHIFT
CURRENT_THREAD_INFO(r9, r1)
ld r4,TI_FLAGS(r9)
andi. r0,r4,_TIF_NEED_RESCHED
bne 1b
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/powerpc/kernel/exceptions-64e.S
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ exc_##n##_bad_stack: \
* interrupts happen before the wait instruction.
*/
#define CHECK_NAPPING() \
clrrdi r11,r1,THREAD_SHIFT; \
CURRENT_THREAD_INFO(r11, r1); \
ld r10,TI_LOCAL_FLAGS(r11); \
andi. r9,r10,_TLF_NAPPING; \
beq+ 1f; \
Expand Down
3 changes: 2 additions & 1 deletion trunk/arch/powerpc/kernel/exceptions-64s.S
Original file line number Diff line number Diff line change
Expand Up @@ -239,6 +239,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
* out of line to handle them
*/
. = 0xe00
hv_exception_trampoline:
b h_data_storage_hv
. = 0xe20
b h_instr_storage_hv
Expand Down Expand Up @@ -851,7 +852,7 @@ BEGIN_FTR_SECTION
bne- do_ste_alloc /* If so handle it */
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)

clrrdi r11,r1,THREAD_SHIFT
CURRENT_THREAD_INFO(r11, r1)
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
bne 77f /* then don't call hash_page now */
Expand Down
12 changes: 6 additions & 6 deletions trunk/arch/powerpc/kernel/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -240,9 +240,9 @@ __ftrace_make_nop(struct module *mod,

/*
* On PPC32 the trampoline looks like:
* 0x3d, 0x60, 0x00, 0x00 lis r11,sym@ha
* 0x39, 0x6b, 0x00, 0x00 addi r11,r11,sym@l
* 0x7d, 0x69, 0x03, 0xa6 mtctr r11
* 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha
* 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l
* 0x7d, 0x89, 0x03, 0xa6 mtctr r12
* 0x4e, 0x80, 0x04, 0x20 bctr
*/

Expand All @@ -257,9 +257,9 @@ __ftrace_make_nop(struct module *mod,
pr_devel(" %08x %08x ", jmp[0], jmp[1]);

/* verify that this is what we expect it to be */
if (((jmp[0] & 0xffff0000) != 0x3d600000) ||
((jmp[1] & 0xffff0000) != 0x396b0000) ||
(jmp[2] != 0x7d6903a6) ||
if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
((jmp[1] & 0xffff0000) != 0x398c0000) ||
(jmp[2] != 0x7d8903a6) ||
(jmp[3] != 0x4e800420)) {
printk(KERN_ERR "Not a trampoline\n");
return -EINVAL;
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/powerpc/kernel/head_fsl_booke.S
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ _ENTRY(__early_start)
li r0,0
stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)

rlwinm r22,r1,0,0,31-THREAD_SHIFT /* current thread_info */
CURRENT_THREAD_INFO(r22, r1)
stw r24, TI_CPU(r22)

bl early_init
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/powerpc/kernel/hw_breakpoint.c
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ void arch_unregister_hw_breakpoint(struct perf_event *bp)
* and the single_step_dabr_instruction(), then cleanup the breakpoint
* restoration variables to prevent dangling pointers.
*/
if (bp->ctx->task)
if (bp->ctx && bp->ctx->task)
bp->ctx->task->thread.last_hit_ubp = NULL;
}

Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/powerpc/kernel/idle_6xx.S
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ BEGIN_FTR_SECTION
DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
rlwinm r9,r1,0,0,31-THREAD_SHIFT /* current thread_info */
CURRENT_THREAD_INFO(r9, r1)
lwz r8,TI_LOCAL_FLAGS(r9) /* set napping bit */
ori r8,r8,_TLF_NAPPING /* so when we take an exception */
stw r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */
Expand All @@ -158,7 +158,7 @@ _GLOBAL(power_save_ppc32_restore)
stw r9,_NIP(r11) /* make it do a blr */

#ifdef CONFIG_SMP
rlwinm r12,r11,0,0,31-THREAD_SHIFT
CURRENT_THREAD_INFO(r12, r11)
lwz r11,TI_CPU(r12) /* get cpu number * 4 */
slwi r11,r11,2
#else
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/powerpc/kernel/idle_book3e.S
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ _GLOBAL(book3e_idle)
1: /* Let's set the _TLF_NAPPING flag so interrupts make us return
* to the right spot
*/
clrrdi r11,r1,THREAD_SHIFT
CURRENT_THREAD_INFO(r11, r1)
ld r10,TI_LOCAL_FLAGS(r11)
ori r10,r10,_TLF_NAPPING
std r10,TI_LOCAL_FLAGS(r11)
Expand Down
Loading

0 comments on commit bde41e1

Please sign in to comment.