Skip to content

Commit

Permalink
powerpc: Fix usage of register macros getting ready for %r0 change
Browse files Browse the repository at this point in the history
Anything that uses a constructed instruction (ie. from ppc-opcode.h),
need to use the new R0 macro, as %r0 is not going to work.

Also convert usages of macros where we are just determining an offset
(usually for a load/store), like:
	std	r14,STK_REG(r14)(r1)
Can't use STK_REG(r14) as %r14 doesn't work in the STK_REG macro since
it's just calculating an offset.

Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
  • Loading branch information
Michael Neuling authored and Benjamin Herrenschmidt committed Jul 10, 2012
1 parent 564aa5c commit c75df6f
Show file tree
Hide file tree
Showing 25 changed files with 659 additions and 659 deletions.
6 changes: 3 additions & 3 deletions arch/powerpc/kernel/cpu_setup_a2.S
Original file line number Diff line number Diff line change
Expand Up @@ -100,19 +100,19 @@ _icswx_skip_guest:
lis r4,(MMUCR0_TLBSEL_I|MMUCR0_ECL)@h
mtspr SPRN_MMUCR0, r4
li r4,A2_IERAT_SIZE-1
PPC_ERATWE(r4,r4,3)
PPC_ERATWE(R4,R4,3)

/* Now set the D-ERAT watermark to 31 */
lis r4,(MMUCR0_TLBSEL_D|MMUCR0_ECL)@h
mtspr SPRN_MMUCR0, r4
li r4,A2_DERAT_SIZE-1
PPC_ERATWE(r4,r4,3)
PPC_ERATWE(R4,R4,3)

/* And invalidate the beast just in case. That won't get rid of
* a bolted entry though it will be in LRU and so will go away eventually
* but let's not bother for now
*/
PPC_ERATILX(0,0,0)
PPC_ERATILX(0,R0,R0)
1:
blr

Expand Down
4 changes: 2 additions & 2 deletions arch/powerpc/kernel/fpu.S
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
lfd fr0,THREAD_FPSCR(r5)
MTFSF_L(fr0)
REST_32FPVSRS(0, r4, r5)
REST_32FPVSRS(0, R4, R5)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
fromreal(r4)
Expand Down Expand Up @@ -140,7 +140,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
addi r3,r3,THREAD /* want THREAD of task */
PPC_LL r5,PT_REGS(r3)
PPC_LCMPI 0,r5,0
SAVE_32FPVSRS(0, r4 ,r3)
SAVE_32FPVSRS(0, R4 ,R3)
mffs fr0
stfd fr0,THREAD_FPSCR(r3)
beq 1f
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kernel/kvm.c
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,7 @@ static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)

if (imm_one) {
p[kvm_emulate_wrtee_reg_offs] =
KVM_INST_LI | __PPC_RT(30) | MSR_EE;
KVM_INST_LI | __PPC_RT(R30) | MSR_EE;
} else {
/* Make clobbered registers work too */
switch (get_rt(rt)) {
Expand Down
4 changes: 2 additions & 2 deletions arch/powerpc/kernel/misc_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,7 @@ _GLOBAL(real_205_readb)
mtmsrd r0
sync
isync
LBZCIX(r3,0,r3)
LBZCIX(R3,0,R3)
isync
mtmsrd r7
sync
Expand All @@ -329,7 +329,7 @@ _GLOBAL(real_205_writeb)
mtmsrd r0
sync
isync
STBCIX(r3,0,r4)
STBCIX(R3,0,R4)
isync
mtmsrd r7
sync
Expand Down
220 changes: 110 additions & 110 deletions arch/powerpc/kvm/book3s_hv_rmhandlers.S
Original file line number Diff line number Diff line change
Expand Up @@ -206,24 +206,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
/* Load up FP, VMX and VSX registers */
bl kvmppc_load_fp

ld r14, VCPU_GPR(r14)(r4)
ld r15, VCPU_GPR(r15)(r4)
ld r16, VCPU_GPR(r16)(r4)
ld r17, VCPU_GPR(r17)(r4)
ld r18, VCPU_GPR(r18)(r4)
ld r19, VCPU_GPR(r19)(r4)
ld r20, VCPU_GPR(r20)(r4)
ld r21, VCPU_GPR(r21)(r4)
ld r22, VCPU_GPR(r22)(r4)
ld r23, VCPU_GPR(r23)(r4)
ld r24, VCPU_GPR(r24)(r4)
ld r25, VCPU_GPR(r25)(r4)
ld r26, VCPU_GPR(r26)(r4)
ld r27, VCPU_GPR(r27)(r4)
ld r28, VCPU_GPR(r28)(r4)
ld r29, VCPU_GPR(r29)(r4)
ld r30, VCPU_GPR(r30)(r4)
ld r31, VCPU_GPR(r31)(r4)
ld r14, VCPU_GPR(R14)(r4)
ld r15, VCPU_GPR(R15)(r4)
ld r16, VCPU_GPR(R16)(r4)
ld r17, VCPU_GPR(R17)(r4)
ld r18, VCPU_GPR(R18)(r4)
ld r19, VCPU_GPR(R19)(r4)
ld r20, VCPU_GPR(R20)(r4)
ld r21, VCPU_GPR(R21)(r4)
ld r22, VCPU_GPR(R22)(r4)
ld r23, VCPU_GPR(R23)(r4)
ld r24, VCPU_GPR(R24)(r4)
ld r25, VCPU_GPR(R25)(r4)
ld r26, VCPU_GPR(R26)(r4)
ld r27, VCPU_GPR(R27)(r4)
ld r28, VCPU_GPR(R28)(r4)
ld r29, VCPU_GPR(R29)(r4)
ld r30, VCPU_GPR(R30)(r4)
ld r31, VCPU_GPR(R31)(r4)

BEGIN_FTR_SECTION
/* Switch DSCR to guest value */
Expand Down Expand Up @@ -547,21 +547,21 @@ fast_guest_return:
mtlr r5
mtcr r6

ld r0, VCPU_GPR(r0)(r4)
ld r1, VCPU_GPR(r1)(r4)
ld r2, VCPU_GPR(r2)(r4)
ld r3, VCPU_GPR(r3)(r4)
ld r5, VCPU_GPR(r5)(r4)
ld r6, VCPU_GPR(r6)(r4)
ld r7, VCPU_GPR(r7)(r4)
ld r8, VCPU_GPR(r8)(r4)
ld r9, VCPU_GPR(r9)(r4)
ld r10, VCPU_GPR(r10)(r4)
ld r11, VCPU_GPR(r11)(r4)
ld r12, VCPU_GPR(r12)(r4)
ld r13, VCPU_GPR(r13)(r4)

ld r4, VCPU_GPR(r4)(r4)
ld r0, VCPU_GPR(R0)(r4)
ld r1, VCPU_GPR(R1)(r4)
ld r2, VCPU_GPR(R2)(r4)
ld r3, VCPU_GPR(R3)(r4)
ld r5, VCPU_GPR(R5)(r4)
ld r6, VCPU_GPR(R6)(r4)
ld r7, VCPU_GPR(R7)(r4)
ld r8, VCPU_GPR(R8)(r4)
ld r9, VCPU_GPR(R9)(r4)
ld r10, VCPU_GPR(R10)(r4)
ld r11, VCPU_GPR(R11)(r4)
ld r12, VCPU_GPR(R12)(r4)
ld r13, VCPU_GPR(R13)(r4)

ld r4, VCPU_GPR(R4)(r4)

hrfid
b .
Expand Down Expand Up @@ -590,22 +590,22 @@ kvmppc_interrupt:

/* Save registers */

std r0, VCPU_GPR(r0)(r9)
std r1, VCPU_GPR(r1)(r9)
std r2, VCPU_GPR(r2)(r9)
std r3, VCPU_GPR(r3)(r9)
std r4, VCPU_GPR(r4)(r9)
std r5, VCPU_GPR(r5)(r9)
std r6, VCPU_GPR(r6)(r9)
std r7, VCPU_GPR(r7)(r9)
std r8, VCPU_GPR(r8)(r9)
std r0, VCPU_GPR(R0)(r9)
std r1, VCPU_GPR(R1)(r9)
std r2, VCPU_GPR(R2)(r9)
std r3, VCPU_GPR(R3)(r9)
std r4, VCPU_GPR(R4)(r9)
std r5, VCPU_GPR(R5)(r9)
std r6, VCPU_GPR(R6)(r9)
std r7, VCPU_GPR(R7)(r9)
std r8, VCPU_GPR(R8)(r9)
ld r0, HSTATE_HOST_R2(r13)
std r0, VCPU_GPR(r9)(r9)
std r10, VCPU_GPR(r10)(r9)
std r11, VCPU_GPR(r11)(r9)
std r0, VCPU_GPR(R9)(r9)
std r10, VCPU_GPR(R10)(r9)
std r11, VCPU_GPR(R11)(r9)
ld r3, HSTATE_SCRATCH0(r13)
lwz r4, HSTATE_SCRATCH1(r13)
std r3, VCPU_GPR(r12)(r9)
std r3, VCPU_GPR(R12)(r9)
stw r4, VCPU_CR(r9)

/* Restore R1/R2 so we can handle faults */
Expand All @@ -626,7 +626,7 @@ kvmppc_interrupt:

GET_SCRATCH0(r3)
mflr r4
std r3, VCPU_GPR(r13)(r9)
std r3, VCPU_GPR(R13)(r9)
std r4, VCPU_LR(r9)

/* Unset guest mode */
Expand Down Expand Up @@ -968,24 +968,24 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)

/* Save non-volatile GPRs */
std r14, VCPU_GPR(r14)(r9)
std r15, VCPU_GPR(r15)(r9)
std r16, VCPU_GPR(r16)(r9)
std r17, VCPU_GPR(r17)(r9)
std r18, VCPU_GPR(r18)(r9)
std r19, VCPU_GPR(r19)(r9)
std r20, VCPU_GPR(r20)(r9)
std r21, VCPU_GPR(r21)(r9)
std r22, VCPU_GPR(r22)(r9)
std r23, VCPU_GPR(r23)(r9)
std r24, VCPU_GPR(r24)(r9)
std r25, VCPU_GPR(r25)(r9)
std r26, VCPU_GPR(r26)(r9)
std r27, VCPU_GPR(r27)(r9)
std r28, VCPU_GPR(r28)(r9)
std r29, VCPU_GPR(r29)(r9)
std r30, VCPU_GPR(r30)(r9)
std r31, VCPU_GPR(r31)(r9)
std r14, VCPU_GPR(R14)(r9)
std r15, VCPU_GPR(R15)(r9)
std r16, VCPU_GPR(R16)(r9)
std r17, VCPU_GPR(R17)(r9)
std r18, VCPU_GPR(R18)(r9)
std r19, VCPU_GPR(R19)(r9)
std r20, VCPU_GPR(R20)(r9)
std r21, VCPU_GPR(R21)(r9)
std r22, VCPU_GPR(R22)(r9)
std r23, VCPU_GPR(R23)(r9)
std r24, VCPU_GPR(R24)(r9)
std r25, VCPU_GPR(R25)(r9)
std r26, VCPU_GPR(R26)(r9)
std r27, VCPU_GPR(R27)(r9)
std r28, VCPU_GPR(R28)(r9)
std r29, VCPU_GPR(R29)(r9)
std r30, VCPU_GPR(R30)(r9)
std r31, VCPU_GPR(R31)(r9)

/* Save SPRGs */
mfspr r3, SPRN_SPRG0
Expand Down Expand Up @@ -1160,7 +1160,7 @@ kvmppc_hdsi:
andi. r0, r11, MSR_DR /* data relocation enabled? */
beq 3f
clrrdi r0, r4, 28
PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */
PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
bne 1f /* if no SLB entry found */
4: std r4, VCPU_FAULT_DAR(r9)
stw r6, VCPU_FAULT_DSISR(r9)
Expand Down Expand Up @@ -1234,7 +1234,7 @@ kvmppc_hisi:
andi. r0, r11, MSR_IR /* instruction relocation enabled? */
beq 3f
clrrdi r0, r10, 28
PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */
PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
bne 1f /* if no SLB entry found */
4:
/* Search the hash table. */
Expand Down Expand Up @@ -1278,7 +1278,7 @@ kvmppc_hisi:
*/
.globl hcall_try_real_mode
hcall_try_real_mode:
ld r3,VCPU_GPR(r3)(r9)
ld r3,VCPU_GPR(R3)(r9)
andi. r0,r11,MSR_PR
bne hcall_real_cont
clrrdi r3,r3,2
Expand All @@ -1291,12 +1291,12 @@ hcall_try_real_mode:
add r3,r3,r4
mtctr r3
mr r3,r9 /* get vcpu pointer */
ld r4,VCPU_GPR(r4)(r9)
ld r4,VCPU_GPR(R4)(r9)
bctrl
cmpdi r3,H_TOO_HARD
beq hcall_real_fallback
ld r4,HSTATE_KVM_VCPU(r13)
std r3,VCPU_GPR(r3)(r4)
std r3,VCPU_GPR(R3)(r4)
ld r10,VCPU_PC(r4)
ld r11,VCPU_MSR(r4)
b fast_guest_return
Expand Down Expand Up @@ -1424,7 +1424,7 @@ _GLOBAL(kvmppc_h_cede)
li r0,0 /* set trap to 0 to say hcall is handled */
stw r0,VCPU_TRAP(r3)
li r0,H_SUCCESS
std r0,VCPU_GPR(r3)(r3)
std r0,VCPU_GPR(R3)(r3)
BEGIN_FTR_SECTION
b 2f /* just send it up to host on 970 */
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
Expand All @@ -1443,7 +1443,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
addi r6,r5,VCORE_NAPPING_THREADS
31: lwarx r4,0,r6
or r4,r4,r0
PPC_POPCNTW(r7,r4)
PPC_POPCNTW(R7,R4)
cmpw r7,r8
bge 2f
stwcx. r4,0,r6
Expand All @@ -1464,24 +1464,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
* DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
*/
/* Save non-volatile GPRs */
std r14, VCPU_GPR(r14)(r3)
std r15, VCPU_GPR(r15)(r3)
std r16, VCPU_GPR(r16)(r3)
std r17, VCPU_GPR(r17)(r3)
std r18, VCPU_GPR(r18)(r3)
std r19, VCPU_GPR(r19)(r3)
std r20, VCPU_GPR(r20)(r3)
std r21, VCPU_GPR(r21)(r3)
std r22, VCPU_GPR(r22)(r3)
std r23, VCPU_GPR(r23)(r3)
std r24, VCPU_GPR(r24)(r3)
std r25, VCPU_GPR(r25)(r3)
std r26, VCPU_GPR(r26)(r3)
std r27, VCPU_GPR(r27)(r3)
std r28, VCPU_GPR(r28)(r3)
std r29, VCPU_GPR(r29)(r3)
std r30, VCPU_GPR(r30)(r3)
std r31, VCPU_GPR(r31)(r3)
std r14, VCPU_GPR(R14)(r3)
std r15, VCPU_GPR(R15)(r3)
std r16, VCPU_GPR(R16)(r3)
std r17, VCPU_GPR(R17)(r3)
std r18, VCPU_GPR(R18)(r3)
std r19, VCPU_GPR(R19)(r3)
std r20, VCPU_GPR(R20)(r3)
std r21, VCPU_GPR(R21)(r3)
std r22, VCPU_GPR(R22)(r3)
std r23, VCPU_GPR(R23)(r3)
std r24, VCPU_GPR(R24)(r3)
std r25, VCPU_GPR(R25)(r3)
std r26, VCPU_GPR(R26)(r3)
std r27, VCPU_GPR(R27)(r3)
std r28, VCPU_GPR(R28)(r3)
std r29, VCPU_GPR(R29)(r3)
std r30, VCPU_GPR(R30)(r3)
std r31, VCPU_GPR(R31)(r3)

/* save FP state */
bl .kvmppc_save_fp
Expand Down Expand Up @@ -1513,24 +1513,24 @@ kvm_end_cede:
bl kvmppc_load_fp

/* Load NV GPRS */
ld r14, VCPU_GPR(r14)(r4)
ld r15, VCPU_GPR(r15)(r4)
ld r16, VCPU_GPR(r16)(r4)
ld r17, VCPU_GPR(r17)(r4)
ld r18, VCPU_GPR(r18)(r4)
ld r19, VCPU_GPR(r19)(r4)
ld r20, VCPU_GPR(r20)(r4)
ld r21, VCPU_GPR(r21)(r4)
ld r22, VCPU_GPR(r22)(r4)
ld r23, VCPU_GPR(r23)(r4)
ld r24, VCPU_GPR(r24)(r4)
ld r25, VCPU_GPR(r25)(r4)
ld r26, VCPU_GPR(r26)(r4)
ld r27, VCPU_GPR(r27)(r4)
ld r28, VCPU_GPR(r28)(r4)
ld r29, VCPU_GPR(r29)(r4)
ld r30, VCPU_GPR(r30)(r4)
ld r31, VCPU_GPR(r31)(r4)
ld r14, VCPU_GPR(R14)(r4)
ld r15, VCPU_GPR(R15)(r4)
ld r16, VCPU_GPR(R16)(r4)
ld r17, VCPU_GPR(R17)(r4)
ld r18, VCPU_GPR(R18)(r4)
ld r19, VCPU_GPR(R19)(r4)
ld r20, VCPU_GPR(R20)(r4)
ld r21, VCPU_GPR(R21)(r4)
ld r22, VCPU_GPR(R22)(r4)
ld r23, VCPU_GPR(R23)(r4)
ld r24, VCPU_GPR(R24)(r4)
ld r25, VCPU_GPR(R25)(r4)
ld r26, VCPU_GPR(R26)(r4)
ld r27, VCPU_GPR(R27)(r4)
ld r28, VCPU_GPR(R28)(r4)
ld r29, VCPU_GPR(R29)(r4)
ld r30, VCPU_GPR(R30)(r4)
ld r31, VCPU_GPR(R31)(r4)

/* clear our bit in vcore->napping_threads */
33: ld r5,HSTATE_KVM_VCORE(r13)
Expand Down Expand Up @@ -1649,7 +1649,7 @@ BEGIN_FTR_SECTION
reg = 0
.rept 32
li r6,reg*16+VCPU_VSRS
STXVD2X(reg,r6,r3)
STXVD2X(reg,R6,R3)
reg = reg + 1
.endr
FTR_SECTION_ELSE
Expand Down Expand Up @@ -1711,7 +1711,7 @@ BEGIN_FTR_SECTION
reg = 0
.rept 32
li r7,reg*16+VCPU_VSRS
LXVD2X(reg,r7,r4)
LXVD2X(reg,R7,R4)
reg = reg + 1
.endr
FTR_SECTION_ELSE
Expand Down
Loading

0 comments on commit c75df6f

Please sign in to comment.