Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 105319
b: refs/heads/master
c: 0d9ea75
h: refs/heads/master
i:
  105317: a11c2e3
  105315: 03ffe41
  105311: 97e2260
v: v3
  • Loading branch information
Jon Tollefson authored and Linus Torvalds committed Jul 24, 2008
1 parent e39835b commit 33710ff
Show file tree
Hide file tree
Showing 10 changed files with 200 additions and 119 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: f4a67cceee4a6f5ed38011a698c9e34747270ae5
refs/heads/master: 0d9ea75443dc7e37843e656b8ebc947a6d16d618
10 changes: 5 additions & 5 deletions trunk/Documentation/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -776,11 +776,11 @@ and is between 256 and 4096 characters. It is defined in the file

hugepages= [HW,X86-32,IA-64] HugeTLB pages to allocate at boot.
hugepagesz= [HW,IA-64,PPC,X86-64] The size of the HugeTLB pages.
On x86 this option can be specified multiple times
interleaved with hugepages= to reserve huge pages
of different sizes. Valid pages sizes on x86-64
are 2M (when the CPU supports "pse") and 1G (when the
CPU supports the "pdpe1gb" cpuinfo flag)
On x86-64 and powerpc, this option can be specified
multiple times interleaved with hugepages= to reserve
huge pages of different sizes. Valid pages sizes on
x86-64 are 2M (when the CPU supports "pse") and 1G
(when the CPU supports the "pdpe1gb" cpuinfo flag)
Note that 1GB pages can only be allocated at boot time
using hugepages= and not freed afterwards.
default_hugepagesz=
Expand Down
9 changes: 4 additions & 5 deletions trunk/arch/powerpc/mm/hash_utils_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,6 @@ int mmu_kernel_ssize = MMU_SEGSIZE_256M;
int mmu_highuser_ssize = MMU_SEGSIZE_256M;
u16 mmu_slb_size = 64;
#ifdef CONFIG_HUGETLB_PAGE
int mmu_huge_psize = MMU_PAGE_16M;
unsigned int HPAGE_SHIFT;
#endif
#ifdef CONFIG_PPC_64K_PAGES
Expand Down Expand Up @@ -460,15 +459,15 @@ static void __init htab_init_page_sizes(void)
/* Reserve 16G huge page memory sections for huge pages */
of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);

/* Init large page size. Currently, we pick 16M or 1M depending
/* Set default large page size. Currently, we pick 16M or 1M depending
* on what is available
*/
if (mmu_psize_defs[MMU_PAGE_16M].shift)
set_huge_psize(MMU_PAGE_16M);
HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
/* With 4k/4level pagetables, we can't (for now) cope with a
* huge page size < PMD_SIZE */
else if (mmu_psize_defs[MMU_PAGE_1M].shift)
set_huge_psize(MMU_PAGE_1M);
HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
#endif /* CONFIG_HUGETLB_PAGE */
}

Expand Down Expand Up @@ -889,7 +888,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)

#ifdef CONFIG_HUGETLB_PAGE
/* Handle hugepage regions */
if (HPAGE_SHIFT && psize == mmu_huge_psize) {
if (HPAGE_SHIFT && mmu_huge_psizes[psize]) {
DBG_LOW(" -> huge page !\n");
return hash_huge_page(mm, access, ea, vsid, local, trap);
}
Expand Down
Loading

0 comments on commit 33710ff

Please sign in to comment.