Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 245681
b: refs/heads/master
c: 5510db9
h: refs/heads/master
i:
  245679: 9cd257e
v: v3
  • Loading branch information
Tejun Heo authored and H. Peter Anvin committed Apr 7, 2011
1 parent 02a875b commit 1cf3214
Show file tree
Hide file tree
Showing 2 changed files with 63 additions and 64 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 5b8443b25c0f323ec190d094e4b441957b02664e
refs/heads/master: 5510db9c1be111528ce46c57f0bec1c9dce258f4
125 changes: 62 additions & 63 deletions trunk/arch/x86/mm/numa_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -264,70 +264,64 @@ void resume_map_numa_kva(pgd_t *pgd_base)
}
#endif

static __init unsigned long calculate_numa_remap_pages(void)
static __init unsigned long init_alloc_remap(int nid, unsigned long offset)
{
int nid;
unsigned long size, reserve_pages = 0;
unsigned long size;
u64 node_kva;

for_each_online_node(nid) {
u64 node_kva;

/*
* The acpi/srat node info can show hot-add memroy zones
* where memory could be added but not currently present.
*/
printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n",
nid, node_start_pfn[nid], node_end_pfn[nid]);
if (node_start_pfn[nid] > max_pfn)
continue;
if (!node_end_pfn[nid])
continue;
if (node_end_pfn[nid] > max_pfn)
node_end_pfn[nid] = max_pfn;

/* ensure the remap includes space for the pgdat. */
size = node_remap_size[nid];
size += ALIGN(sizeof(pg_data_t), PAGE_SIZE);

/* convert size to large (pmd size) pages, rounding up */
size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES;
/* now the roundup is correct, convert to PAGE_SIZE pages */
size = size * PTRS_PER_PTE;

node_kva = memblock_find_in_range(node_start_pfn[nid] << PAGE_SHIFT,
((u64)node_end_pfn[nid])<<PAGE_SHIFT,
((u64)size)<<PAGE_SHIFT,
LARGE_PAGE_BYTES);
if (node_kva == MEMBLOCK_ERROR)
panic("Can not get kva ram\n");

node_remap_size[nid] = size;
node_remap_offset[nid] = reserve_pages;
reserve_pages += size;
printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of"
" node %d at %llx\n",
size, nid, node_kva >> PAGE_SHIFT);

/*
* prevent kva address below max_low_pfn want it on system
* with less memory later.
* layout will be: KVA address , KVA RAM
*
* we are supposed to only record the one less then max_low_pfn
* but we could have some hole in high memory, and it will only
* check page_is_ram(pfn) && !page_is_reserved_early(pfn) to decide
* to use it as free.
* So memblock_x86_reserve_range here, hope we don't run out of that array
*/
memblock_x86_reserve_range(node_kva,
node_kva + (((u64)size)<<PAGE_SHIFT),
"KVA RAM");

node_remap_start_pfn[nid] = node_kva >> PAGE_SHIFT;
}
printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n",
reserve_pages);
return reserve_pages;
/*
* The acpi/srat node info can show hot-add memroy zones where
* memory could be added but not currently present.
*/
printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n",
nid, node_start_pfn[nid], node_end_pfn[nid]);
if (node_start_pfn[nid] > max_pfn)
return 0;
if (!node_end_pfn[nid])
return 0;
if (node_end_pfn[nid] > max_pfn)
node_end_pfn[nid] = max_pfn;

/* ensure the remap includes space for the pgdat. */
size = node_remap_size[nid];
size += ALIGN(sizeof(pg_data_t), PAGE_SIZE);

/* convert size to large (pmd size) pages, rounding up */
size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES;
/* now the roundup is correct, convert to PAGE_SIZE pages */
size = size * PTRS_PER_PTE;

node_kva = memblock_find_in_range(node_start_pfn[nid] << PAGE_SHIFT,
(u64)node_end_pfn[nid] << PAGE_SHIFT,
(u64)size << PAGE_SHIFT,
LARGE_PAGE_BYTES);
if (node_kva == MEMBLOCK_ERROR)
panic("Can not get kva ram\n");

node_remap_size[nid] = size;
node_remap_offset[nid] = offset;
printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of node %d at %llx\n",
size, nid, node_kva >> PAGE_SHIFT);

/*
* prevent kva address below max_low_pfn want it on system
* with less memory later.
* layout will be: KVA address , KVA RAM
*
* we are supposed to only record the one less then
* max_low_pfn but we could have some hole in high memory,
* and it will only check page_is_ram(pfn) &&
* !page_is_reserved_early(pfn) to decide to use it as free.
* So memblock_x86_reserve_range here, hope we don't run out
* of that array
*/
memblock_x86_reserve_range(node_kva,
node_kva + ((u64)size << PAGE_SHIFT),
"KVA RAM");

node_remap_start_pfn[nid] = node_kva >> PAGE_SHIFT;

return size;
}

static void init_remap_allocator(int nid)
Expand All @@ -346,6 +340,7 @@ static void init_remap_allocator(int nid)

void __init initmem_init(void)
{
unsigned long reserve_pages = 0;
int nid;

/*
Expand All @@ -359,7 +354,11 @@ void __init initmem_init(void)
get_memcfg_numa();
numa_init_array();

kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE);
for_each_online_node(nid)
reserve_pages += init_alloc_remap(nid, reserve_pages);
kva_pages = roundup(reserve_pages, PTRS_PER_PTE);
printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n",
reserve_pages);

kva_start_pfn = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
max_low_pfn << PAGE_SHIFT,
Expand Down

0 comments on commit 1cf3214

Please sign in to comment.