diff --git a/[refs] b/[refs] index f677bfd8e457..670b6e26d044 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: c605782b1c3f1c18a55dc1a75b19ed0288f61ac3 +refs/heads/master: a7d2dac802a7ff0677b0a5c2fdb9fe0d3fdaee0c diff --git a/trunk/arch/powerpc/include/asm/pgtable-ppc32.h b/trunk/arch/powerpc/include/asm/pgtable-ppc32.h index a9c6ecef3656..67ceffc01b43 100644 --- a/trunk/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/trunk/arch/powerpc/include/asm/pgtable-ppc32.h @@ -146,9 +146,29 @@ extern int icache_44x_need_flush; #define _PAGE_HPTEFLAGS _PAGE_HASHPTE -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \ - _PAGE_SPECIAL) +/* Location of the PFN in the PTE. Most platforms use the same as _PAGE_SHIFT + * here (ie, naturally aligned). Platform who don't just pre-define the + * value so we don't override it here + */ +#ifndef PTE_RPN_SHIFT +#define PTE_RPN_SHIFT (PAGE_SHIFT) +#endif + +#ifdef CONFIG_PTE_64BIT +#define PTE_RPN_MAX (1ULL << (64 - PTE_RPN_SHIFT)) +#define PTE_RPN_MASK (~((1ULL<> PFN_SHIFT_OFFSET) +#define pte_pfn(x) (pte_val(x) >> PTE_RPN_SHIFT) #define pte_page(x) pfn_to_page(pte_pfn(x)) -#define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) |\ +#define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |\ pgprot_val(prot)) #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) #endif /* __ASSEMBLY__ */ diff --git a/trunk/arch/powerpc/include/asm/pte-fsl-booke.h b/trunk/arch/powerpc/include/asm/pte-fsl-booke.h index 0fe5de7bea3d..10820f58acf5 100644 --- a/trunk/arch/powerpc/include/asm/pte-fsl-booke.h +++ b/trunk/arch/powerpc/include/asm/pte-fsl-booke.h @@ -36,6 +36,8 @@ #ifdef CONFIG_PTE_64BIT /* ERPN in a PTE never gets cleared, ignore it */ #define _PTE_NONE_MASK 0xffffffffffff0000ULL +/* We extend the size of the PTE flags area when using 64-bit PTEs */ +#define PTE_RPN_SHIFT (PAGE_SHIFT + 8) #endif #define _PMD_PRESENT 0