Skip to content

Commit

Permalink
powerpc/mm: Remove the dependency on pte bit position in asm code
Browse files Browse the repository at this point in the history
We should not expect pte bit position in asm code. Simply
by moving part of that to C

Acked-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
  • Loading branch information
Aneesh Kumar K.V authored and Michael Ellerman committed Dec 14, 2015
1 parent 91f1da9 commit 106713a
Show file tree
Hide file tree
Showing 2 changed files with 33 additions and 14 deletions.
18 changes: 4 additions & 14 deletions arch/powerpc/kernel/exceptions-64s.S
Original file line number Diff line number Diff line change
Expand Up @@ -1556,29 +1556,19 @@ do_hash_page:
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
bne 77f /* then don't call hash_page now */
/*
* We need to set the _PAGE_USER bit if MSR_PR is set or if we are
* accessing a userspace segment (even from the kernel). We assume
* kernel addresses always have the high bit set.
*/
rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
orc r0,r12,r0 /* MSR_PR | ~high_bit */
rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
ori r4,r4,1 /* add _PAGE_PRESENT */
rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */

/*
* r3 contains the faulting address
* r4 contains the required access permissions
* r4 msr
* r5 contains the trap number
* r6 contains dsisr
*
* at return r3 = 0 for success, 1 for page fault, negative for error
*/
mr r4,r12
ld r6,_DSISR(r1)
bl hash_page /* build HPTE if possible */
cmpdi r3,0 /* see if hash_page succeeded */
bl __hash_page /* build HPTE if possible */
cmpdi r3,0 /* see if __hash_page succeeded */

/* Success */
beq fast_exc_return_irq /* Return from exception on success */
Expand Down
29 changes: 29 additions & 0 deletions arch/powerpc/mm/hash_utils_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -1206,6 +1206,35 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
}
EXPORT_SYMBOL_GPL(hash_page);

int __hash_page(unsigned long ea, unsigned long msr, unsigned long trap,
unsigned long dsisr)
{
unsigned long access = _PAGE_PRESENT;
unsigned long flags = 0;
struct mm_struct *mm = current->mm;

if (REGION_ID(ea) == VMALLOC_REGION_ID)
mm = &init_mm;

if (dsisr & DSISR_NOHPTE)
flags |= HPTE_NOHPTE_UPDATE;

if (dsisr & DSISR_ISSTORE)
access |= _PAGE_RW;
/*
* We need to set the _PAGE_USER bit if MSR_PR is set or if we are
* accessing a userspace segment (even from the kernel). We assume
* kernel addresses always have the high bit set.
*/
if ((msr & MSR_PR) || (REGION_ID(ea) == USER_REGION_ID))
access |= _PAGE_USER;

if (trap == 0x400)
access |= _PAGE_EXEC;

return hash_page_mm(mm, ea, access, trap, flags);
}

void hash_preload(struct mm_struct *mm, unsigned long ea,
unsigned long access, unsigned long trap)
{
Expand Down

0 comments on commit 106713a

Please sign in to comment.