Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 119467
b: refs/heads/master
c: 4a61866
h: refs/heads/master
i:
  119465: 610c9d8
  119463: 78b31d5
v: v3
  • Loading branch information
Dave Hansen authored and Paul Mackerras committed Nov 30, 2008
1 parent 956819f commit 3e92b6e
Show file tree
Hide file tree
Showing 2 changed files with 76 additions and 48 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 4b824de9b18b8d1013e9fc9e4b0f855ced8cac2c
refs/heads/master: 4a6186696e7f15b3ea4dafcdb64ee0703e0e4487
122 changes: 75 additions & 47 deletions trunk/arch/powerpc/mm/numa.c
Original file line number Diff line number Diff line change
Expand Up @@ -865,6 +865,67 @@ static struct notifier_block __cpuinitdata ppc64_numa_nb = {
.priority = 1 /* Must run before sched domains notifier. */
};

static void mark_reserved_regions_for_nid(int nid)
{
struct pglist_data *node = NODE_DATA(nid);
int i;

for (i = 0; i < lmb.reserved.cnt; i++) {
unsigned long physbase = lmb.reserved.region[i].base;
unsigned long size = lmb.reserved.region[i].size;
unsigned long start_pfn = physbase >> PAGE_SHIFT;
unsigned long end_pfn = ((physbase + size) >> PAGE_SHIFT);
struct node_active_region node_ar;
unsigned long node_end_pfn = node->node_start_pfn +
node->node_spanned_pages;

/*
* Check to make sure that this lmb.reserved area is
* within the bounds of the node that we care about.
* Checking the nid of the start and end points is not
* sufficient because the reserved area could span the
* entire node.
*/
if (end_pfn <= node->node_start_pfn ||
start_pfn >= node_end_pfn)
continue;

get_node_active_region(start_pfn, &node_ar);
while (start_pfn < end_pfn &&
node_ar.start_pfn < node_ar.end_pfn) {
unsigned long reserve_size = size;
/*
* if reserved region extends past active region
* then trim size to active region
*/
if (end_pfn > node_ar.end_pfn)
reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
- (start_pfn << PAGE_SHIFT);
dbg("reserve_bootmem %lx %lx nid=%d\n", physbase,
reserve_size, node_ar.nid);
reserve_bootmem_node(NODE_DATA(node_ar.nid), physbase,
reserve_size, BOOTMEM_DEFAULT);
/*
* if reserved region is contained in the active region
* then done.
*/
if (end_pfn <= node_ar.end_pfn)
break;

/*
* reserved region extends past the active region
* get next active region that contains this
* reserved region
*/
start_pfn = node_ar.end_pfn;
physbase = start_pfn << PAGE_SHIFT;
size = size - reserve_size;
get_node_active_region(start_pfn, &node_ar);
}
}
}


void __init do_init_bootmem(void)
{
int nid;
Expand All @@ -890,7 +951,13 @@ void __init do_init_bootmem(void)

get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);

/* Allocate the node structure node local if possible */
/*
* Allocate the node structure node local if possible
*
* Be careful moving this around, as it relies on all
* previous nodes' bootmem to be initialized and have
* all reserved areas marked.
*/
NODE_DATA(nid) = careful_allocation(nid,
sizeof(struct pglist_data),
SMP_CACHE_BYTES, end_pfn);
Expand Down Expand Up @@ -922,53 +989,14 @@ void __init do_init_bootmem(void)
start_pfn, end_pfn);

free_bootmem_with_active_regions(nid, end_pfn);
}

/* Mark reserved regions */
for (i = 0; i < lmb.reserved.cnt; i++) {
unsigned long physbase = lmb.reserved.region[i].base;
unsigned long size = lmb.reserved.region[i].size;
unsigned long start_pfn = physbase >> PAGE_SHIFT;
unsigned long end_pfn = ((physbase + size) >> PAGE_SHIFT);
struct node_active_region node_ar;

get_node_active_region(start_pfn, &node_ar);
while (start_pfn < end_pfn &&
node_ar.start_pfn < node_ar.end_pfn) {
unsigned long reserve_size = size;
/*
* if reserved region extends past active region
* then trim size to active region
*/
if (end_pfn > node_ar.end_pfn)
reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
- (start_pfn << PAGE_SHIFT);
dbg("reserve_bootmem %lx %lx nid=%d\n", physbase,
reserve_size, node_ar.nid);
reserve_bootmem_node(NODE_DATA(node_ar.nid), physbase,
reserve_size, BOOTMEM_DEFAULT);
/*
* if reserved region is contained in the active region
* then done.
*/
if (end_pfn <= node_ar.end_pfn)
break;

/*
* reserved region extends past the active region
* get next active region that contains this
* reserved region
*/
start_pfn = node_ar.end_pfn;
physbase = start_pfn << PAGE_SHIFT;
size = size - reserve_size;
get_node_active_region(start_pfn, &node_ar);
}

}

for_each_online_node(nid)
/*
* Be very careful about moving this around. Future
* calls to careful_allocation() depend on this getting
* done correctly.
*/
mark_reserved_regions_for_nid(nid);
sparse_memory_present_with_active_regions(nid);
}
}

void __init paging_init(void)
Expand Down

0 comments on commit 3e92b6e

Please sign in to comment.