Skip to content

Commit

Permalink
powerpc/mm: Use the required number of VSID bits in slbmte
Browse files Browse the repository at this point in the history
ASM_VSID_SCRAMBLE can leave non-zero bits in the high 28 bits of the result
for 256MB segment (40 bits for 1T segment). Properly mask them before using
the values in slbmte

Reviewed-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
  • Loading branch information
Aneesh Kumar K.V authored and Benjamin Herrenschmidt committed Sep 17, 2012
1 parent 7aa0727 commit ac8dc28
Showing 1 changed file with 10 additions and 2 deletions.
12 changes: 10 additions & 2 deletions arch/powerpc/mm/slb_low.S
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,11 @@ _GLOBAL(slb_allocate_user)
*/
slb_finish_load:
ASM_VSID_SCRAMBLE(r10,r9,256M)
rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */
/*
* bits above VSID_BITS_256M need to be ignored from r10
* also combine VSID and flags
*/
rldimi r11,r10,SLB_VSID_SHIFT,(64 - (SLB_VSID_SHIFT + VSID_BITS_256M))

/* r3 = EA, r11 = VSID data */
/*
Expand Down Expand Up @@ -287,7 +291,11 @@ _GLOBAL(slb_compare_rr_to_size)
slb_finish_load_1T:
srdi r10,r10,40-28 /* get 1T ESID */
ASM_VSID_SCRAMBLE(r10,r9,1T)
rldimi r11,r10,SLB_VSID_SHIFT_1T,16 /* combine VSID and flags */
/*
* bits above VSID_BITS_1T need to be ignored from r10
* also combine VSID and flags
*/
rldimi r11,r10,SLB_VSID_SHIFT_1T,(64 - (SLB_VSID_SHIFT_1T + VSID_BITS_1T))
li r10,MMU_SEGSIZE_1T
rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */

Expand Down

0 comments on commit ac8dc28

Please sign in to comment.