Skip to content

Commit

Permalink
powerpc: Add hugepage support to 64-bit tablewalk code for FSL_BOOK3E
Browse files Browse the repository at this point in the history
Before hugetlb, at each level of the table, we test for
!0 to determine if we have a valid table entry.  With hugetlb, this
compare becomes:
        < 0 is a normal entry
        0 is an invalid entry
        > 0 is huge

This works because the hugepage code pulls the top bit off the entry
(which for non-huge entries always has the top bit set) as an
indicator that we have a hugepage.

Signed-off-by: Becky Bruce <beckyb@kernel.crashing.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
  • Loading branch information
Becky Bruce authored and Benjamin Herrenschmidt committed Dec 7, 2011
1 parent 27609a4 commit d1b9b12
Showing 1 changed file with 7 additions and 7 deletions.
14 changes: 7 additions & 7 deletions arch/powerpc/mm/tlb_low_64e.S
Original file line number Diff line number Diff line change
Expand Up @@ -136,22 +136,22 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
#ifndef CONFIG_PPC_64K_PAGES
rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
clrrdi r15,r15,3
cmlpdi cr0,r14,0
beq tlb_miss_fault_bolted /* Bad pgd entry */
cmpdi cr0,r14,0
bge tlb_miss_fault_bolted /* Bad pgd entry or hugepage; bail */
ldx r14,r14,r15 /* grab pud entry */
#endif /* CONFIG_PPC_64K_PAGES */

rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
clrrdi r15,r15,3
cmpldi cr0,r14,0
beq tlb_miss_fault_bolted
cmpdi cr0,r14,0
bge tlb_miss_fault_bolted
ldx r14,r14,r15 /* Grab pmd entry */

rldicl r15,r16,64-PAGE_SHIFT+3,64-PTE_INDEX_SIZE-3
clrrdi r15,r15,3
cmpldi cr0,r14,0
beq tlb_miss_fault_bolted
ldx r14,r14,r15 /* Grab PTE */
cmpdi cr0,r14,0
bge tlb_miss_fault_bolted
ldx r14,r14,r15 /* Grab PTE, normal (!huge) page */

/* Check if required permissions are met */
andc. r15,r11,r14
Expand Down

0 comments on commit d1b9b12

Please sign in to comment.