Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 310431
b: refs/heads/master
c: 68e3e92
h: refs/heads/master
i:
  310429: f7aaa64
  310427: 2dd539c
  310423: 6c05ea1
  310415: 6938da0
  310399: 7e050e6
v: v3
  • Loading branch information
Linus Torvalds committed Jun 4, 2012
1 parent 9c1dc49 commit 466e51a
Show file tree
Hide file tree
Showing 5 changed files with 29 additions and 151 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 752dc185dacba1edcba425e67fc6df3c7793a5c3
refs/heads/master: 68e3e92620c323703bc7db75c2ba15239ee85c39
19 changes: 0 additions & 19 deletions trunk/include/linux/compaction.h
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
#ifndef _LINUX_COMPACTION_H
#define _LINUX_COMPACTION_H

#include <linux/node.h>

/* Return values for compact_zone() and try_to_compact_pages() */
/* compaction didn't start as it was not possible or direct reclaim was more suitable */
#define COMPACT_SKIPPED 0
Expand All @@ -13,23 +11,6 @@
/* The full zone was compacted */
#define COMPACT_COMPLETE 3

/*
* compaction supports three modes
*
* COMPACT_ASYNC_MOVABLE uses asynchronous migration and only scans
* MIGRATE_MOVABLE pageblocks as migration sources and targets.
* COMPACT_ASYNC_UNMOVABLE uses asynchronous migration and only scans
* MIGRATE_MOVABLE pageblocks as migration sources.
* MIGRATE_UNMOVABLE pageblocks are scanned as potential migration
* targets and convers them to MIGRATE_MOVABLE if possible
* COMPACT_SYNC uses synchronous migration and scans all pageblocks
*/
enum compact_mode {
COMPACT_ASYNC_MOVABLE,
COMPACT_ASYNC_UNMOVABLE,
COMPACT_SYNC,
};

#ifdef CONFIG_COMPACTION
extern int sysctl_compact_memory;
extern int sysctl_compaction_handler(struct ctl_table *table, int write,
Expand Down
142 changes: 23 additions & 119 deletions trunk/mm/compaction.c
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
*/
while (unlikely(too_many_isolated(zone))) {
/* async migration should just abort */
if (cc->mode != COMPACT_SYNC)
if (!cc->sync)
return 0;

congestion_wait(BLK_RW_ASYNC, HZ/10);
Expand Down Expand Up @@ -304,8 +304,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
* satisfies the allocation
*/
pageblock_nr = low_pfn >> pageblock_order;
if (cc->mode != COMPACT_SYNC &&
last_pageblock_nr != pageblock_nr &&
if (!cc->sync && last_pageblock_nr != pageblock_nr &&
!migrate_async_suitable(get_pageblock_migratetype(page))) {
low_pfn += pageblock_nr_pages;
low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
Expand All @@ -326,7 +325,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
continue;
}

if (cc->mode != COMPACT_SYNC)
if (!cc->sync)
mode |= ISOLATE_ASYNC_MIGRATE;

lruvec = mem_cgroup_page_lruvec(page, zone);
Expand Down Expand Up @@ -361,90 +360,27 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,

#endif /* CONFIG_COMPACTION || CONFIG_CMA */
#ifdef CONFIG_COMPACTION
/*
* Returns true if MIGRATE_UNMOVABLE pageblock was successfully
* converted to MIGRATE_MOVABLE type, false otherwise.
*/
static bool rescue_unmovable_pageblock(struct page *page)
{
unsigned long pfn, start_pfn, end_pfn;
struct page *start_page, *end_page;

pfn = page_to_pfn(page);
start_pfn = pfn & ~(pageblock_nr_pages - 1);
end_pfn = start_pfn + pageblock_nr_pages;

start_page = pfn_to_page(start_pfn);
end_page = pfn_to_page(end_pfn);

/* Do not deal with pageblocks that overlap zones */
if (page_zone(start_page) != page_zone(end_page))
return false;

for (page = start_page, pfn = start_pfn; page < end_page; pfn++,
page++) {
if (!pfn_valid_within(pfn))
continue;

if (PageBuddy(page)) {
int order = page_order(page);

pfn += (1 << order) - 1;
page += (1 << order) - 1;

continue;
} else if (page_count(page) == 0 || PageLRU(page))
continue;

return false;
}

set_pageblock_migratetype(page, MIGRATE_MOVABLE);
move_freepages_block(page_zone(page), page, MIGRATE_MOVABLE);
return true;
}

enum smt_result {
GOOD_AS_MIGRATION_TARGET,
FAIL_UNMOVABLE_TARGET,
FAIL_BAD_TARGET,
};

/*
* Returns GOOD_AS_MIGRATION_TARGET if the page is within a block
* suitable for migration to, FAIL_UNMOVABLE_TARGET if the page
* is within a MIGRATE_UNMOVABLE block, FAIL_BAD_TARGET otherwise.
*/
static enum smt_result suitable_migration_target(struct page *page,
struct compact_control *cc)
/* Returns true if the page is within a block suitable for migration to */
static bool suitable_migration_target(struct page *page)
{

int migratetype = get_pageblock_migratetype(page);

/* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
return FAIL_BAD_TARGET;
return false;

/* If the page is a large free page, then allow migration */
if (PageBuddy(page) && page_order(page) >= pageblock_order)
return GOOD_AS_MIGRATION_TARGET;
return true;

/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
if (cc->mode != COMPACT_ASYNC_UNMOVABLE &&
migrate_async_suitable(migratetype))
return GOOD_AS_MIGRATION_TARGET;

if (cc->mode == COMPACT_ASYNC_MOVABLE &&
migratetype == MIGRATE_UNMOVABLE)
return FAIL_UNMOVABLE_TARGET;

if (cc->mode != COMPACT_ASYNC_MOVABLE &&
migratetype == MIGRATE_UNMOVABLE &&
rescue_unmovable_pageblock(page))
return GOOD_AS_MIGRATION_TARGET;
if (migrate_async_suitable(migratetype))
return true;

/* Otherwise skip the block */
return FAIL_BAD_TARGET;
return false;
}

/*
Expand Down Expand Up @@ -477,13 +413,6 @@ static void isolate_freepages(struct zone *zone,

zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;

/*
* isolate_freepages() may be called more than once during
* compact_zone_order() run and we want only the most recent
* count.
*/
cc->nr_pageblocks_skipped = 0;

/*
* Isolate free pages until enough are available to migrate the
* pages on cc->migratepages. We stop searching if the migrate
Expand All @@ -492,7 +421,6 @@ static void isolate_freepages(struct zone *zone,
for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
pfn -= pageblock_nr_pages) {
unsigned long isolated;
enum smt_result ret;

if (!pfn_valid(pfn))
continue;
Expand All @@ -509,12 +437,9 @@ static void isolate_freepages(struct zone *zone,
continue;

/* Check the block is suitable for migration */
ret = suitable_migration_target(page, cc);
if (ret != GOOD_AS_MIGRATION_TARGET) {
if (ret == FAIL_UNMOVABLE_TARGET)
cc->nr_pageblocks_skipped++;
if (!suitable_migration_target(page))
continue;
}

/*
* Found a block suitable for isolating free pages from. Now
* we disabled interrupts, double check things are ok and
Expand All @@ -523,14 +448,12 @@ static void isolate_freepages(struct zone *zone,
*/
isolated = 0;
spin_lock_irqsave(&zone->lock, flags);
ret = suitable_migration_target(page, cc);
if (ret == GOOD_AS_MIGRATION_TARGET) {
if (suitable_migration_target(page)) {
end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
isolated = isolate_freepages_block(pfn, end_pfn,
freelist, false);
nr_freepages += isolated;
} else if (ret == FAIL_UNMOVABLE_TARGET)
cc->nr_pageblocks_skipped++;
}
spin_unlock_irqrestore(&zone->lock, flags);

/*
Expand Down Expand Up @@ -762,9 +685,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)

nr_migrate = cc->nr_migratepages;
err = migrate_pages(&cc->migratepages, compaction_alloc,
(unsigned long)&cc->freepages, false,
(cc->mode == COMPACT_SYNC) ? MIGRATE_SYNC_LIGHT
: MIGRATE_ASYNC);
(unsigned long)cc, false,
cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
update_nr_listpages(cc);
nr_remaining = cc->nr_migratepages;

Expand Down Expand Up @@ -793,26 +715,20 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)

static unsigned long compact_zone_order(struct zone *zone,
int order, gfp_t gfp_mask,
enum compact_mode mode,
unsigned long *nr_pageblocks_skipped)
bool sync)
{
struct compact_control cc = {
.nr_freepages = 0,
.nr_migratepages = 0,
.order = order,
.migratetype = allocflags_to_migratetype(gfp_mask),
.zone = zone,
.mode = mode,
.sync = sync,
};
unsigned long rc;

INIT_LIST_HEAD(&cc.freepages);
INIT_LIST_HEAD(&cc.migratepages);

rc = compact_zone(zone, &cc);
*nr_pageblocks_skipped = cc.nr_pageblocks_skipped;

return rc;
return compact_zone(zone, &cc);
}

int sysctl_extfrag_threshold = 500;
Expand All @@ -837,8 +753,6 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
struct zoneref *z;
struct zone *zone;
int rc = COMPACT_SKIPPED;
unsigned long nr_pageblocks_skipped;
enum compact_mode mode;

/*
* Check whether it is worth even starting compaction. The order check is
Expand All @@ -855,22 +769,12 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
nodemask) {
int status;

mode = sync ? COMPACT_SYNC : COMPACT_ASYNC_MOVABLE;
retry:
status = compact_zone_order(zone, order, gfp_mask, mode,
&nr_pageblocks_skipped);
status = compact_zone_order(zone, order, gfp_mask, sync);
rc = max(status, rc);

/* If a normal allocation would succeed, stop compacting */
if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
break;

if (rc == COMPACT_COMPLETE && mode == COMPACT_ASYNC_MOVABLE) {
if (nr_pageblocks_skipped) {
mode = COMPACT_ASYNC_UNMOVABLE;
goto retry;
}
}
}

return rc;
Expand Down Expand Up @@ -904,7 +808,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
if (ok && cc->order > zone->compact_order_failed)
zone->compact_order_failed = cc->order + 1;
/* Currently async compaction is never deferred. */
else if (!ok && cc->mode == COMPACT_SYNC)
else if (!ok && cc->sync)
defer_compaction(zone, cc->order);
}

Expand All @@ -919,7 +823,7 @@ int compact_pgdat(pg_data_t *pgdat, int order)
{
struct compact_control cc = {
.order = order,
.mode = COMPACT_ASYNC_MOVABLE,
.sync = false,
};

return __compact_pgdat(pgdat, &cc);
Expand All @@ -929,7 +833,7 @@ static int compact_node(int nid)
{
struct compact_control cc = {
.order = -1,
.mode = COMPACT_SYNC,
.sync = true,
};

return __compact_pgdat(NODE_DATA(nid), &cc);
Expand Down
9 changes: 1 addition & 8 deletions trunk/mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -94,17 +94,13 @@ extern void putback_lru_page(struct page *page);
/*
* in mm/page_alloc.c
*/
extern void set_pageblock_migratetype(struct page *page, int migratetype);
extern int move_freepages_block(struct zone *zone, struct page *page,
int migratetype);
extern void __free_pages_bootmem(struct page *page, unsigned int order);
extern void prep_compound_page(struct page *page, unsigned long order);
#ifdef CONFIG_MEMORY_FAILURE
extern bool is_free_buddy_page(struct page *page);
#endif

#if defined CONFIG_COMPACTION || defined CONFIG_CMA
#include <linux/compaction.h>

/*
* in mm/compaction.c
Expand All @@ -123,14 +119,11 @@ struct compact_control {
unsigned long nr_migratepages; /* Number of pages to migrate */
unsigned long free_pfn; /* isolate_freepages search base */
unsigned long migrate_pfn; /* isolate_migratepages search base */
enum compact_mode mode; /* Compaction mode */
bool sync; /* Synchronous migration */

int order; /* order a direct compactor needs */
int migratetype; /* MOVABLE, RECLAIMABLE etc */
struct zone *zone;

/* Number of UNMOVABLE destination pageblocks skipped during scan */
unsigned long nr_pageblocks_skipped;
};

unsigned long
Expand Down
8 changes: 4 additions & 4 deletions trunk/mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ EXPORT_SYMBOL(nr_online_nodes);

int page_group_by_mobility_disabled __read_mostly;

void set_pageblock_migratetype(struct page *page, int migratetype)
static void set_pageblock_migratetype(struct page *page, int migratetype)
{

if (unlikely(page_group_by_mobility_disabled))
Expand Down Expand Up @@ -954,8 +954,8 @@ static int move_freepages(struct zone *zone,
return pages_moved;
}

int move_freepages_block(struct zone *zone, struct page *page,
int migratetype)
static int move_freepages_block(struct zone *zone, struct page *page,
int migratetype)
{
unsigned long start_pfn, end_pfn;
struct page *start_page, *end_page;
Expand Down Expand Up @@ -5651,7 +5651,7 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
.nr_migratepages = 0,
.order = -1,
.zone = page_zone(pfn_to_page(start)),
.mode = COMPACT_SYNC,
.sync = true,
};
INIT_LIST_HEAD(&cc.migratepages);

Expand Down

0 comments on commit 466e51a

Please sign in to comment.