Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 322086
b: refs/heads/master
c: de74f1c
h: refs/heads/master
v: v3
  • Loading branch information
Mel Gorman authored and Linus Torvalds committed Aug 21, 2012
1 parent 81f5245 commit 419ba0c
Show file tree
Hide file tree
Showing 2 changed files with 29 additions and 27 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 9a9a9a7adafe62a34de8b4fb48936c1c5f9bafa5
refs/heads/master: de74f1cc3b1e9730d9b58580cd11361d30cd182d
54 changes: 28 additions & 26 deletions trunk/mm/compaction.c
Original file line number Diff line number Diff line change
Expand Up @@ -383,6 +383,20 @@ static bool suitable_migration_target(struct page *page)
return false;
}

/*
* Returns the start pfn of the last page block in a zone. This is the starting
* point for full compaction of a zone. Compaction searches for free pages from
* the end of each zone, while isolate_freepages_block scans forward inside each
* page block.
*/
static unsigned long start_free_pfn(struct zone *zone)
{
unsigned long free_pfn;
free_pfn = zone->zone_start_pfn + zone->spanned_pages;
free_pfn &= ~(pageblock_nr_pages-1);
return free_pfn;
}

/*
* Based on information in the current compact_control, find blocks
* suitable for isolating free pages from and then isolate them.
Expand Down Expand Up @@ -422,17 +436,6 @@ static void isolate_freepages(struct zone *zone,
pfn -= pageblock_nr_pages) {
unsigned long isolated;

/*
* Skip ahead if another thread is compacting in the area
* simultaneously. If we wrapped around, we can only skip
* ahead if zone->compact_cached_free_pfn also wrapped to
* above our starting point.
*/
if (cc->order > 0 && (!cc->wrapped ||
zone->compact_cached_free_pfn >
cc->start_free_pfn))
pfn = min(pfn, zone->compact_cached_free_pfn);

if (!pfn_valid(pfn))
continue;

Expand Down Expand Up @@ -474,7 +477,15 @@ static void isolate_freepages(struct zone *zone,
*/
if (isolated) {
high_pfn = max(high_pfn, pfn);
if (cc->order > 0)

/*
* If the free scanner has wrapped, update
* compact_cached_free_pfn to point to the highest
* pageblock with free pages. This reduces excessive
* scanning of full pageblocks near the end of the
* zone
*/
if (cc->order > 0 && cc->wrapped)
zone->compact_cached_free_pfn = high_pfn;
}
}
Expand All @@ -484,6 +495,11 @@ static void isolate_freepages(struct zone *zone,

cc->free_pfn = high_pfn;
cc->nr_freepages = nr_freepages;

/* If compact_cached_free_pfn is reset then set it now */
if (cc->order > 0 && !cc->wrapped &&
zone->compact_cached_free_pfn == start_free_pfn(zone))
zone->compact_cached_free_pfn = high_pfn;
}

/*
Expand Down Expand Up @@ -570,20 +586,6 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
return ISOLATE_SUCCESS;
}

/*
* Returns the start pfn of the last page block in a zone. This is the starting
* point for full compaction of a zone. Compaction searches for free pages from
* the end of each zone, while isolate_freepages_block scans forward inside each
* page block.
*/
static unsigned long start_free_pfn(struct zone *zone)
{
unsigned long free_pfn;
free_pfn = zone->zone_start_pfn + zone->spanned_pages;
free_pfn &= ~(pageblock_nr_pages-1);
return free_pfn;
}

static int compact_finished(struct zone *zone,
struct compact_control *cc)
{
Expand Down

0 comments on commit 419ba0c

Please sign in to comment.