Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 320798
b: refs/heads/master
c: 7db8889
h: refs/heads/master
v: v3
  • Loading branch information
Rik van Riel authored and Linus Torvalds committed Aug 1, 2012
1 parent 2d9d2f6 commit 56cd3ce
Show file tree
Hide file tree
Showing 5 changed files with 74 additions and 6 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: ab2158848775c7918288f2c423d3e4dbbc7d34eb
refs/heads/master: 7db8889ab05b57200158432755af318fb68854a2
4 changes: 4 additions & 0 deletions trunk/include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -368,6 +368,10 @@ struct zone {
*/
spinlock_t lock;
int all_unreclaimable; /* All pages pinned */
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
/* pfn where the last incremental compaction isolated free pages */
unsigned long compact_cached_free_pfn;
#endif
#ifdef CONFIG_MEMORY_HOTPLUG
/* see spanned/present_pages for more description */
seqlock_t span_seqlock;
Expand Down
63 changes: 58 additions & 5 deletions trunk/mm/compaction.c
Original file line number Diff line number Diff line change
Expand Up @@ -422,6 +422,17 @@ static void isolate_freepages(struct zone *zone,
pfn -= pageblock_nr_pages) {
unsigned long isolated;

/*
* Skip ahead if another thread is compacting in the area
* simultaneously. If we wrapped around, we can only skip
* ahead if zone->compact_cached_free_pfn also wrapped to
* above our starting point.
*/
if (cc->order > 0 && (!cc->wrapped ||
zone->compact_cached_free_pfn >
cc->start_free_pfn))
pfn = min(pfn, zone->compact_cached_free_pfn);

if (!pfn_valid(pfn))
continue;

Expand Down Expand Up @@ -461,8 +472,11 @@ static void isolate_freepages(struct zone *zone,
* looking for free pages, the search will restart here as
* page migration may have returned some pages to the allocator
*/
if (isolated)
if (isolated) {
high_pfn = max(high_pfn, pfn);
if (cc->order > 0)
zone->compact_cached_free_pfn = high_pfn;
}
}

/* split_free_page does not map the pages */
Expand Down Expand Up @@ -556,6 +570,20 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
return ISOLATE_SUCCESS;
}

/*
* Returns the start pfn of the last page block in a zone. This is the starting
* point for full compaction of a zone. Compaction searches for free pages from
* the end of each zone, while isolate_freepages_block scans forward inside each
* page block.
*/
static unsigned long start_free_pfn(struct zone *zone)
{
unsigned long free_pfn;
free_pfn = zone->zone_start_pfn + zone->spanned_pages;
free_pfn &= ~(pageblock_nr_pages-1);
return free_pfn;
}

static int compact_finished(struct zone *zone,
struct compact_control *cc)
{
Expand All @@ -565,8 +593,26 @@ static int compact_finished(struct zone *zone,
if (fatal_signal_pending(current))
return COMPACT_PARTIAL;

/* Compaction run completes if the migrate and free scanner meet */
if (cc->free_pfn <= cc->migrate_pfn)
/*
* A full (order == -1) compaction run starts at the beginning and
* end of a zone; it completes when the migrate and free scanner meet.
* A partial (order > 0) compaction can start with the free scanner
* at a random point in the zone, and may have to restart.
*/
if (cc->free_pfn <= cc->migrate_pfn) {
if (cc->order > 0 && !cc->wrapped) {
/* We started partway through; restart at the end. */
unsigned long free_pfn = start_free_pfn(zone);
zone->compact_cached_free_pfn = free_pfn;
cc->free_pfn = free_pfn;
cc->wrapped = 1;
return COMPACT_CONTINUE;
}
return COMPACT_COMPLETE;
}

/* We wrapped around and ended up where we started. */
if (cc->wrapped && cc->free_pfn <= cc->start_free_pfn)
return COMPACT_COMPLETE;

/*
Expand Down Expand Up @@ -664,8 +710,15 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)

/* Setup to move all movable pages to the end of the zone */
cc->migrate_pfn = zone->zone_start_pfn;
cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
cc->free_pfn &= ~(pageblock_nr_pages-1);

if (cc->order > 0) {
/* Incremental compaction. Start where the last one stopped. */
cc->free_pfn = zone->compact_cached_free_pfn;
cc->start_free_pfn = cc->free_pfn;
} else {
/* Order == -1 starts at the end of the zone. */
cc->free_pfn = start_free_pfn(zone);
}

migrate_prep_local();

Expand Down
6 changes: 6 additions & 0 deletions trunk/mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -118,8 +118,14 @@ struct compact_control {
unsigned long nr_freepages; /* Number of isolated free pages */
unsigned long nr_migratepages; /* Number of pages to migrate */
unsigned long free_pfn; /* isolate_freepages search base */
unsigned long start_free_pfn; /* where we started the search */
unsigned long migrate_pfn; /* isolate_migratepages search base */
bool sync; /* Synchronous migration */
bool wrapped; /* Order > 0 compactions are
incremental, once free_pfn
and migrate_pfn meet, we restart
from the top of the zone;
remember we wrapped around. */

int order; /* order a direct compactor needs */
int migratetype; /* MOVABLE, RECLAIMABLE etc */
Expand Down
5 changes: 5 additions & 0 deletions trunk/mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -4397,6 +4397,11 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,

zone->spanned_pages = size;
zone->present_pages = realsize;
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
zone->compact_cached_free_pfn = zone->zone_start_pfn +
zone->spanned_pages;
zone->compact_cached_free_pfn &= ~(pageblock_nr_pages-1);
#endif
#ifdef CONFIG_NUMA
zone->node = nid;
zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
Expand Down

0 comments on commit 56cd3ce

Please sign in to comment.