Skip to content

Commit

Permalink
[PATCH] Invert sense of SLB class bit
Browse files Browse the repository at this point in the history
Currently, we set the class bit in kernel SLB entries, and clear it on
user SLB entries.  On POWER5, ERAT entries created in real mode have
the class bit clear.  So to avoid flushing kernel ERAT entries on each
context switch, this patch inverts our usage of the class bit, setting
it on user SLB entries and clearing it on kernel SLB entries.

Booted on POWER5 and G5.

Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
  • Loading branch information
David Gibson authored and Paul Mackerras committed Sep 6, 2005
1 parent 0fdf0b8 commit 14b3466
Show file tree
Hide file tree
Showing 4 changed files with 15 additions and 12 deletions.
11 changes: 5 additions & 6 deletions arch/ppc64/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -400,15 +400,14 @@ BEGIN_FTR_SECTION
cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
cror eq,4*cr1+eq,eq
beq 2f /* if yes, don't slbie it */
oris r0,r6,0x0800 /* set C (class) bit */

/* Bolt in the new stack SLB entry */
ld r7,KSP_VSID(r4) /* Get new stack's VSID */
oris r6,r6,(SLB_ESID_V)@h
ori r6,r6,(SLB_NUM_BOLTED-1)@l
slbie r0
slbie r0 /* Workaround POWER5 < DD2.1 issue */
slbmte r7,r6
oris r0,r6,(SLB_ESID_V)@h
ori r0,r0,(SLB_NUM_BOLTED-1)@l
slbie r6
slbie r6 /* Workaround POWER5 < DD2.1 issue */
slbmte r7,r0
isync

2:
Expand Down
6 changes: 4 additions & 2 deletions arch/ppc64/mm/hugetlbpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,8 @@ static void flush_low_segments(void *parm)
for (i = 0; i < NUM_LOW_AREAS; i++) {
if (! (areas & (1U << i)))
continue;
asm volatile("slbie %0" : : "r" (i << SID_SHIFT));
asm volatile("slbie %0"
: : "r" ((i << SID_SHIFT) | SLBIE_C));
}

asm volatile("isync" : : : "memory");
Expand All @@ -164,7 +165,8 @@ static void flush_high_segments(void *parm)
continue;
for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
asm volatile("slbie %0"
:: "r" ((i << HTLB_AREA_SHIFT) + (j << SID_SHIFT)));
:: "r" (((i << HTLB_AREA_SHIFT)
+ (j << SID_SHIFT)) | SLBIE_C));
}

asm volatile("isync" : : : "memory");
Expand Down
4 changes: 2 additions & 2 deletions arch/ppc64/mm/slb.c
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,8 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
int i;
asm volatile("isync" : : : "memory");
for (i = 0; i < offset; i++) {
esid_data = (unsigned long)get_paca()->slb_cache[i]
<< SID_SHIFT;
esid_data = ((unsigned long)get_paca()->slb_cache[i]
<< SID_SHIFT) | SLBIE_C;
asm volatile("slbie %0" : : "r" (esid_data));
}
asm volatile("isync" : : : "memory");
Expand Down
6 changes: 4 additions & 2 deletions include/asm-ppc64/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,10 @@ extern char initial_stab[];
#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
#define SLB_VSID_LS ASM_CONST(0x0000000000000070) /* size of largepage */

#define SLB_VSID_KERNEL (SLB_VSID_KP|SLB_VSID_C)
#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS)
#define SLB_VSID_KERNEL (SLB_VSID_KP)
#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)

#define SLBIE_C (0x08000000)

/*
* Hash table
Expand Down

0 comments on commit 14b3466

Please sign in to comment.