diff --git a/[refs] b/[refs] index b6a062df71ad..efc21d8d3e93 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 025d7917a5ede982a5669c6735ef73a227b9827e +refs/heads/master: 00df438e89a9003895948170e1abf64dd4665872 diff --git a/trunk/arch/powerpc/mm/hugetlbpage.c b/trunk/arch/powerpc/mm/hugetlbpage.c index ed0aab0208a6..f1c2d55b4377 100644 --- a/trunk/arch/powerpc/mm/hugetlbpage.c +++ b/trunk/arch/powerpc/mm/hugetlbpage.c @@ -736,14 +736,21 @@ static int __init hugetlbpage_init(void) if (!cpu_has_feature(CPU_FTR_16M_PAGE)) return -ENODEV; + /* Add supported huge page sizes. Need to change HUGE_MAX_HSTATE * and adjust PTE_NONCACHE_NUM if the number of supported huge page * sizes changes. */ set_huge_psize(MMU_PAGE_16M); - set_huge_psize(MMU_PAGE_64K); set_huge_psize(MMU_PAGE_16G); + /* Temporarily disable support for 64K huge pages when 64K SPU local + * store support is enabled as the current implementation conflicts. + */ +#ifndef CONFIG_SPU_FS_64K_LS + set_huge_psize(MMU_PAGE_64K); +#endif + for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { if (mmu_huge_psizes[psize]) { huge_pgtable_cache(psize) = kmem_cache_create(