Skip to content

Commit

Permalink
memory_hotplug: handle empty zone when online_movable/online_kernel
Browse files Browse the repository at this point in the history
Make online_movable/online_kernel can empty a zone or can move memory to a
empty zone.

Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Lai Jiangshan authored and Linus Torvalds committed Dec 12, 2012
1 parent 511c2ab commit e455a9b
Showing 1 changed file with 45 additions and 6 deletions.
51 changes: 45 additions & 6 deletions mm/memory_hotplug.c
Original file line number Diff line number Diff line change
Expand Up @@ -219,8 +219,17 @@ static void resize_zone(struct zone *zone, unsigned long start_pfn,
{
zone_span_writelock(zone);

zone->zone_start_pfn = start_pfn;
zone->spanned_pages = end_pfn - start_pfn;
if (end_pfn - start_pfn) {
zone->zone_start_pfn = start_pfn;
zone->spanned_pages = end_pfn - start_pfn;
} else {
/*
* make it consist as free_area_init_core(),
* if spanned_pages = 0, then keep start_pfn = 0
*/
zone->zone_start_pfn = 0;
zone->spanned_pages = 0;
}

zone_span_writeunlock(zone);
}
Expand All @@ -236,10 +245,19 @@ static void fix_zone_id(struct zone *zone, unsigned long start_pfn,
set_page_links(pfn_to_page(pfn), zid, nid, pfn);
}

static int move_pfn_range_left(struct zone *z1, struct zone *z2,
static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
unsigned long start_pfn, unsigned long end_pfn)
{
int ret;
unsigned long flags;
unsigned long z1_start_pfn;

if (!z1->wait_table) {
ret = init_currently_empty_zone(z1, start_pfn,
end_pfn - start_pfn, MEMMAP_HOTPLUG);
if (ret)
return ret;
}

pgdat_resize_lock(z1->zone_pgdat, &flags);

Expand All @@ -253,7 +271,13 @@ static int move_pfn_range_left(struct zone *z1, struct zone *z2,
if (end_pfn <= z2->zone_start_pfn)
goto out_fail;

resize_zone(z1, z1->zone_start_pfn, end_pfn);
/* use start_pfn for z1's start_pfn if z1 is empty */
if (z1->spanned_pages)
z1_start_pfn = z1->zone_start_pfn;
else
z1_start_pfn = start_pfn;

resize_zone(z1, z1_start_pfn, end_pfn);
resize_zone(z2, end_pfn, z2->zone_start_pfn + z2->spanned_pages);

pgdat_resize_unlock(z1->zone_pgdat, &flags);
Expand All @@ -266,10 +290,19 @@ static int move_pfn_range_left(struct zone *z1, struct zone *z2,
return -1;
}

static int move_pfn_range_right(struct zone *z1, struct zone *z2,
static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2,
unsigned long start_pfn, unsigned long end_pfn)
{
int ret;
unsigned long flags;
unsigned long z2_end_pfn;

if (!z2->wait_table) {
ret = init_currently_empty_zone(z2, start_pfn,
end_pfn - start_pfn, MEMMAP_HOTPLUG);
if (ret)
return ret;
}

pgdat_resize_lock(z1->zone_pgdat, &flags);

Expand All @@ -283,8 +316,14 @@ static int move_pfn_range_right(struct zone *z1, struct zone *z2,
if (start_pfn >= z1->zone_start_pfn + z1->spanned_pages)
goto out_fail;

/* use end_pfn for z2's end_pfn if z2 is empty */
if (z2->spanned_pages)
z2_end_pfn = z2->zone_start_pfn + z2->spanned_pages;
else
z2_end_pfn = end_pfn;

resize_zone(z1, z1->zone_start_pfn, start_pfn);
resize_zone(z2, start_pfn, z2->zone_start_pfn + z2->spanned_pages);
resize_zone(z2, start_pfn, z2_end_pfn);

pgdat_resize_unlock(z1->zone_pgdat, &flags);

Expand Down

0 comments on commit e455a9b

Please sign in to comment.