Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 105316
b: refs/heads/master
c: 658013e
h: refs/heads/master
v: v3
  • Loading branch information
Jon Tollefson authored and Linus Torvalds committed Jul 24, 2008
1 parent 03ffe41 commit fe45ab7
Show file tree
Hide file tree
Showing 4 changed files with 62 additions and 2 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: ec4b2c0c8312d1118c2acd00c89988ecf955d5cc
refs/heads/master: 658013e93eb70494f7300bc90457b09a807232a4
44 changes: 43 additions & 1 deletion trunk/arch/powerpc/mm/hash_utils_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@

#define KB (1024)
#define MB (1024*KB)
#define GB (1024L*MB)

/*
* Note: pte --> Linux PTE
Expand Down Expand Up @@ -329,6 +330,44 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
return 0;
}

/* Scan for 16G memory blocks that have been set aside for huge pages
* and reserve those blocks for 16G huge pages.
*/
static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
const char *uname, int depth,
void *data) {
char *type = of_get_flat_dt_prop(node, "device_type", NULL);
unsigned long *addr_prop;
u32 *page_count_prop;
unsigned int expected_pages;
long unsigned int phys_addr;
long unsigned int block_size;

/* We are scanning "memory" nodes only */
if (type == NULL || strcmp(type, "memory") != 0)
return 0;

/* This property is the log base 2 of the number of virtual pages that
* will represent this memory block. */
page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
if (page_count_prop == NULL)
return 0;
expected_pages = (1 << page_count_prop[0]);
addr_prop = of_get_flat_dt_prop(node, "reg", NULL);
if (addr_prop == NULL)
return 0;
phys_addr = addr_prop[0];
block_size = addr_prop[1];
if (block_size != (16 * GB))
return 0;
printk(KERN_INFO "Huge page(16GB) memory: "
"addr = 0x%lX size = 0x%lX pages = %d\n",
phys_addr, block_size, expected_pages);
lmb_reserve(phys_addr, block_size * expected_pages);
add_gpage(phys_addr, block_size, expected_pages);
return 0;
}

static void __init htab_init_page_sizes(void)
{
int rc;
Expand Down Expand Up @@ -418,7 +457,10 @@ static void __init htab_init_page_sizes(void)
);

#ifdef CONFIG_HUGETLB_PAGE
/* Init large page size. Currently, we pick 16M or 1M depending
/* Reserve 16G huge page memory sections for huge pages */
of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);

/* Init large page size. Currently, we pick 16M or 1M depending
* on what is available
*/
if (mmu_psize_defs[MMU_PAGE_16M].shift)
Expand Down
16 changes: 16 additions & 0 deletions trunk/arch/powerpc/mm/hugetlbpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,22 @@ pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr)
}
#endif

/* Build list of addresses of gigantic pages. This function is used in early
* boot before the buddy or bootmem allocator is setup.
*/
void add_gpage(unsigned long addr, unsigned long page_size,
unsigned long number_of_pages)
{
if (!addr)
return;
while (number_of_pages > 0) {
gpage_freearray[nr_gpages] = addr;
nr_gpages++;
number_of_pages--;
addr += page_size;
}
}

/* Moves the gigantic page addresses from the temporary list to the
* huge_boot_pages list. */
int alloc_bootmem_huge_page(struct hstate *h)
Expand Down
2 changes: 2 additions & 0 deletions trunk/include/asm-powerpc/mmu-hash64.h
Original file line number Diff line number Diff line change
Expand Up @@ -281,6 +281,8 @@ extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
unsigned long pstart, unsigned long mode,
int psize, int ssize);
extern void set_huge_psize(int psize);
extern void add_gpage(unsigned long addr, unsigned long page_size,
unsigned long number_of_pages);
extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);

extern void htab_initialize(void);
Expand Down

0 comments on commit fe45ab7

Please sign in to comment.