Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 308865
b: refs/heads/master
c: 5ceb9ce
h: refs/heads/master
i:
  308863: 60a4fa2
v: v3
  • Loading branch information
Bartlomiej Zolnierkiewicz authored and Linus Torvalds committed May 29, 2012
1 parent fb936c1 commit 3c9ef42
Show file tree
Hide file tree
Showing 5 changed files with 151 additions and 29 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 238305bb4d418c95977162ba13c11880685fc731
refs/heads/master: 5ceb9ce6fe9462a298bb2cd5c9f1ca6cb80a0199
19 changes: 19 additions & 0 deletions trunk/include/linux/compaction.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#ifndef _LINUX_COMPACTION_H
#define _LINUX_COMPACTION_H

#include <linux/node.h>

/* Return values for compact_zone() and try_to_compact_pages() */
/* compaction didn't start as it was not possible or direct reclaim was more suitable */
#define COMPACT_SKIPPED 0
Expand All @@ -11,6 +13,23 @@
/* The full zone was compacted */
#define COMPACT_COMPLETE 3

/*
* compaction supports three modes
*
* COMPACT_ASYNC_MOVABLE uses asynchronous migration and only scans
* MIGRATE_MOVABLE pageblocks as migration sources and targets.
* COMPACT_ASYNC_UNMOVABLE uses asynchronous migration and only scans
* MIGRATE_MOVABLE pageblocks as migration sources.
* MIGRATE_UNMOVABLE pageblocks are scanned as potential migration
* targets and convers them to MIGRATE_MOVABLE if possible
* COMPACT_SYNC uses synchronous migration and scans all pageblocks
*/
enum compact_mode {
COMPACT_ASYNC_MOVABLE,
COMPACT_ASYNC_UNMOVABLE,
COMPACT_SYNC,
};

#ifdef CONFIG_COMPACTION
extern int sysctl_compact_memory;
extern int sysctl_compaction_handler(struct ctl_table *table, int write,
Expand Down
142 changes: 119 additions & 23 deletions trunk/mm/compaction.c
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
*/
while (unlikely(too_many_isolated(zone))) {
/* async migration should just abort */
if (!cc->sync)
if (cc->mode != COMPACT_SYNC)
return 0;

congestion_wait(BLK_RW_ASYNC, HZ/10);
Expand Down Expand Up @@ -303,7 +303,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
* satisfies the allocation
*/
pageblock_nr = low_pfn >> pageblock_order;
if (!cc->sync && last_pageblock_nr != pageblock_nr &&
if (cc->mode != COMPACT_SYNC &&
last_pageblock_nr != pageblock_nr &&
!migrate_async_suitable(get_pageblock_migratetype(page))) {
low_pfn += pageblock_nr_pages;
low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
Expand All @@ -324,7 +325,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
continue;
}

if (!cc->sync)
if (cc->mode != COMPACT_SYNC)
mode |= ISOLATE_ASYNC_MIGRATE;

/* Try isolate the page */
Expand Down Expand Up @@ -357,27 +358,90 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,

#endif /* CONFIG_COMPACTION || CONFIG_CMA */
#ifdef CONFIG_COMPACTION
/*
* Returns true if MIGRATE_UNMOVABLE pageblock was successfully
* converted to MIGRATE_MOVABLE type, false otherwise.
*/
static bool rescue_unmovable_pageblock(struct page *page)
{
unsigned long pfn, start_pfn, end_pfn;
struct page *start_page, *end_page;

pfn = page_to_pfn(page);
start_pfn = pfn & ~(pageblock_nr_pages - 1);
end_pfn = start_pfn + pageblock_nr_pages;

start_page = pfn_to_page(start_pfn);
end_page = pfn_to_page(end_pfn);

/* Do not deal with pageblocks that overlap zones */
if (page_zone(start_page) != page_zone(end_page))
return false;

for (page = start_page, pfn = start_pfn; page < end_page; pfn++,
page++) {
if (!pfn_valid_within(pfn))
continue;

if (PageBuddy(page)) {
int order = page_order(page);

pfn += (1 << order) - 1;
page += (1 << order) - 1;

continue;
} else if (page_count(page) == 0 || PageLRU(page))
continue;

return false;
}

set_pageblock_migratetype(page, MIGRATE_MOVABLE);
move_freepages_block(page_zone(page), page, MIGRATE_MOVABLE);
return true;
}

/* Returns true if the page is within a block suitable for migration to */
static bool suitable_migration_target(struct page *page)
enum smt_result {
GOOD_AS_MIGRATION_TARGET,
FAIL_UNMOVABLE_TARGET,
FAIL_BAD_TARGET,
};

/*
* Returns GOOD_AS_MIGRATION_TARGET if the page is within a block
* suitable for migration to, FAIL_UNMOVABLE_TARGET if the page
* is within a MIGRATE_UNMOVABLE block, FAIL_BAD_TARGET otherwise.
*/
static enum smt_result suitable_migration_target(struct page *page,
struct compact_control *cc)
{

int migratetype = get_pageblock_migratetype(page);

/* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
return false;
return FAIL_BAD_TARGET;

/* If the page is a large free page, then allow migration */
if (PageBuddy(page) && page_order(page) >= pageblock_order)
return true;
return GOOD_AS_MIGRATION_TARGET;

/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
if (migrate_async_suitable(migratetype))
return true;
if (cc->mode != COMPACT_ASYNC_UNMOVABLE &&
migrate_async_suitable(migratetype))
return GOOD_AS_MIGRATION_TARGET;

if (cc->mode == COMPACT_ASYNC_MOVABLE &&
migratetype == MIGRATE_UNMOVABLE)
return FAIL_UNMOVABLE_TARGET;

if (cc->mode != COMPACT_ASYNC_MOVABLE &&
migratetype == MIGRATE_UNMOVABLE &&
rescue_unmovable_pageblock(page))
return GOOD_AS_MIGRATION_TARGET;

/* Otherwise skip the block */
return false;
return FAIL_BAD_TARGET;
}

/*
Expand Down Expand Up @@ -410,6 +474,13 @@ static void isolate_freepages(struct zone *zone,

zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;

/*
* isolate_freepages() may be called more than once during
* compact_zone_order() run and we want only the most recent
* count.
*/
cc->nr_pageblocks_skipped = 0;

/*
* Isolate free pages until enough are available to migrate the
* pages on cc->migratepages. We stop searching if the migrate
Expand All @@ -418,6 +489,7 @@ static void isolate_freepages(struct zone *zone,
for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
pfn -= pageblock_nr_pages) {
unsigned long isolated;
enum smt_result ret;

if (!pfn_valid(pfn))
continue;
Expand All @@ -434,9 +506,12 @@ static void isolate_freepages(struct zone *zone,
continue;

/* Check the block is suitable for migration */
if (!suitable_migration_target(page))
ret = suitable_migration_target(page, cc);
if (ret != GOOD_AS_MIGRATION_TARGET) {
if (ret == FAIL_UNMOVABLE_TARGET)
cc->nr_pageblocks_skipped++;
continue;

}
/*
* Found a block suitable for isolating free pages from. Now
* we disabled interrupts, double check things are ok and
Expand All @@ -445,12 +520,14 @@ static void isolate_freepages(struct zone *zone,
*/
isolated = 0;
spin_lock_irqsave(&zone->lock, flags);
if (suitable_migration_target(page)) {
ret = suitable_migration_target(page, cc);
if (ret == GOOD_AS_MIGRATION_TARGET) {
end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
isolated = isolate_freepages_block(pfn, end_pfn,
freelist, false);
nr_freepages += isolated;
}
} else if (ret == FAIL_UNMOVABLE_TARGET)
cc->nr_pageblocks_skipped++;
spin_unlock_irqrestore(&zone->lock, flags);

/*
Expand Down Expand Up @@ -682,8 +759,9 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)

nr_migrate = cc->nr_migratepages;
err = migrate_pages(&cc->migratepages, compaction_alloc,
(unsigned long)cc, false,
cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
(unsigned long)&cc->freepages, false,
(cc->mode == COMPACT_SYNC) ? MIGRATE_SYNC_LIGHT
: MIGRATE_ASYNC);
update_nr_listpages(cc);
nr_remaining = cc->nr_migratepages;

Expand Down Expand Up @@ -712,20 +790,26 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)

static unsigned long compact_zone_order(struct zone *zone,
int order, gfp_t gfp_mask,
bool sync)
enum compact_mode mode,
unsigned long *nr_pageblocks_skipped)
{
struct compact_control cc = {
.nr_freepages = 0,
.nr_migratepages = 0,
.order = order,
.migratetype = allocflags_to_migratetype(gfp_mask),
.zone = zone,
.sync = sync,
.mode = mode,
};
unsigned long rc;

INIT_LIST_HEAD(&cc.freepages);
INIT_LIST_HEAD(&cc.migratepages);

return compact_zone(zone, &cc);
rc = compact_zone(zone, &cc);
*nr_pageblocks_skipped = cc.nr_pageblocks_skipped;

return rc;
}

int sysctl_extfrag_threshold = 500;
Expand All @@ -750,6 +834,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
struct zoneref *z;
struct zone *zone;
int rc = COMPACT_SKIPPED;
unsigned long nr_pageblocks_skipped;
enum compact_mode mode;

/*
* Check whether it is worth even starting compaction. The order check is
Expand All @@ -766,12 +852,22 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
nodemask) {
int status;

status = compact_zone_order(zone, order, gfp_mask, sync);
mode = sync ? COMPACT_SYNC : COMPACT_ASYNC_MOVABLE;
retry:
status = compact_zone_order(zone, order, gfp_mask, mode,
&nr_pageblocks_skipped);
rc = max(status, rc);

/* If a normal allocation would succeed, stop compacting */
if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
break;

if (rc == COMPACT_COMPLETE && mode == COMPACT_ASYNC_MOVABLE) {
if (nr_pageblocks_skipped) {
mode = COMPACT_ASYNC_UNMOVABLE;
goto retry;
}
}
}

return rc;
Expand Down Expand Up @@ -805,7 +901,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
if (ok && cc->order > zone->compact_order_failed)
zone->compact_order_failed = cc->order + 1;
/* Currently async compaction is never deferred. */
else if (!ok && cc->sync)
else if (!ok && cc->mode == COMPACT_SYNC)
defer_compaction(zone, cc->order);
}

Expand All @@ -820,7 +916,7 @@ int compact_pgdat(pg_data_t *pgdat, int order)
{
struct compact_control cc = {
.order = order,
.sync = false,
.mode = COMPACT_ASYNC_MOVABLE,
};

return __compact_pgdat(pgdat, &cc);
Expand All @@ -830,7 +926,7 @@ static int compact_node(int nid)
{
struct compact_control cc = {
.order = -1,
.sync = true,
.mode = COMPACT_SYNC,
};

return __compact_pgdat(NODE_DATA(nid), &cc);
Expand Down
9 changes: 8 additions & 1 deletion trunk/mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -94,13 +94,17 @@ extern void putback_lru_page(struct page *page);
/*
* in mm/page_alloc.c
*/
extern void set_pageblock_migratetype(struct page *page, int migratetype);
extern int move_freepages_block(struct zone *zone, struct page *page,
int migratetype);
extern void __free_pages_bootmem(struct page *page, unsigned int order);
extern void prep_compound_page(struct page *page, unsigned long order);
#ifdef CONFIG_MEMORY_FAILURE
extern bool is_free_buddy_page(struct page *page);
#endif

#if defined CONFIG_COMPACTION || defined CONFIG_CMA
#include <linux/compaction.h>

/*
* in mm/compaction.c
Expand All @@ -119,11 +123,14 @@ struct compact_control {
unsigned long nr_migratepages; /* Number of pages to migrate */
unsigned long free_pfn; /* isolate_freepages search base */
unsigned long migrate_pfn; /* isolate_migratepages search base */
bool sync; /* Synchronous migration */
enum compact_mode mode; /* Compaction mode */

int order; /* order a direct compactor needs */
int migratetype; /* MOVABLE, RECLAIMABLE etc */
struct zone *zone;

/* Number of UNMOVABLE destination pageblocks skipped during scan */
unsigned long nr_pageblocks_skipped;
};

unsigned long
Expand Down
8 changes: 4 additions & 4 deletions trunk/mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ EXPORT_SYMBOL(nr_online_nodes);

int page_group_by_mobility_disabled __read_mostly;

static void set_pageblock_migratetype(struct page *page, int migratetype)
void set_pageblock_migratetype(struct page *page, int migratetype)
{

if (unlikely(page_group_by_mobility_disabled))
Expand Down Expand Up @@ -954,8 +954,8 @@ static int move_freepages(struct zone *zone,
return pages_moved;
}

static int move_freepages_block(struct zone *zone, struct page *page,
int migratetype)
int move_freepages_block(struct zone *zone, struct page *page,
int migratetype)
{
unsigned long start_pfn, end_pfn;
struct page *start_page, *end_page;
Expand Down Expand Up @@ -5657,7 +5657,7 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
.nr_migratepages = 0,
.order = -1,
.zone = page_zone(pfn_to_page(start)),
.sync = true,
.mode = COMPACT_SYNC,
};
INIT_LIST_HEAD(&cc.migratepages);

Expand Down

0 comments on commit 3c9ef42

Please sign in to comment.