Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 332439
b: refs/heads/master
c: bb13ffe
h: refs/heads/master
i:
  332437: 42a9269
  332435: 398b79a
  332431: 53d2244
v: v3
  • Loading branch information
Mel Gorman authored and Linus Torvalds committed Oct 9, 2012
1 parent 16414d5 commit 2625201
Show file tree
Hide file tree
Showing 6 changed files with 152 additions and 39 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 753341a4b85ff337487b9959c71c529f522004f4
refs/heads/master: bb13ffeb9f6bfeb301443994dfbf29f91117dfb3
3 changes: 3 additions & 0 deletions trunk/include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -369,6 +369,9 @@ struct zone {
*/
spinlock_t lock;
int all_unreclaimable; /* All pages pinned */
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
unsigned long compact_blockskip_expire;
#endif
#ifdef CONFIG_MEMORY_HOTPLUG
/* see spanned/present_pages for more description */
seqlock_t span_seqlock;
Expand Down
19 changes: 17 additions & 2 deletions trunk/include/linux/pageblock-flags.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,9 @@ enum pageblock_bits {
PB_migrate,
PB_migrate_end = PB_migrate + 3 - 1,
/* 3 bits required for migrate types */
#ifdef CONFIG_COMPACTION
PB_migrate_skip,/* If set the block is skipped by compaction */
#endif /* CONFIG_COMPACTION */
NR_PAGEBLOCK_BITS
};

Expand Down Expand Up @@ -65,10 +68,22 @@ unsigned long get_pageblock_flags_group(struct page *page,
void set_pageblock_flags_group(struct page *page, unsigned long flags,
int start_bitidx, int end_bitidx);

#ifdef CONFIG_COMPACTION
#define get_pageblock_skip(page) \
get_pageblock_flags_group(page, PB_migrate_skip, \
PB_migrate_skip + 1)
#define clear_pageblock_skip(page) \
set_pageblock_flags_group(page, 0, PB_migrate_skip, \
PB_migrate_skip + 1)
#define set_pageblock_skip(page) \
set_pageblock_flags_group(page, 1, PB_migrate_skip, \
PB_migrate_skip + 1)
#endif /* CONFIG_COMPACTION */

#define get_pageblock_flags(page) \
get_pageblock_flags_group(page, 0, NR_PAGEBLOCK_BITS-1)
get_pageblock_flags_group(page, 0, PB_migrate_end)
#define set_pageblock_flags(page, flags) \
set_pageblock_flags_group(page, flags, \
0, NR_PAGEBLOCK_BITS-1)
0, PB_migrate_end)

#endif /* PAGEBLOCK_FLAGS_H */
125 changes: 108 additions & 17 deletions trunk/mm/compaction.c
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,79 @@ static inline bool migrate_async_suitable(int migratetype)
return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
}

#ifdef CONFIG_COMPACTION
/* Returns true if the pageblock should be scanned for pages to isolate. */
static inline bool isolation_suitable(struct compact_control *cc,
struct page *page)
{
if (cc->ignore_skip_hint)
return true;

return !get_pageblock_skip(page);
}

/*
* This function is called to clear all cached information on pageblocks that
* should be skipped for page isolation when the migrate and free page scanner
* meet.
*/
static void reset_isolation_suitable(struct zone *zone)
{
unsigned long start_pfn = zone->zone_start_pfn;
unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
unsigned long pfn;

/*
* Do not reset more than once every five seconds. If allocations are
* failing sufficiently quickly to allow this to happen then continually
* scanning for compaction is not going to help. The choice of five
* seconds is arbitrary but will mitigate excessive scanning.
*/
if (time_before(jiffies, zone->compact_blockskip_expire))
return;
zone->compact_blockskip_expire = jiffies + (HZ * 5);

/* Walk the zone and mark every pageblock as suitable for isolation */
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
struct page *page;

cond_resched();

if (!pfn_valid(pfn))
continue;

page = pfn_to_page(pfn);
if (zone != page_zone(page))
continue;

clear_pageblock_skip(page);
}
}

/*
* If no pages were isolated then mark this pageblock to be skipped in the
* future. The information is later cleared by reset_isolation_suitable().
*/
static void update_pageblock_skip(struct page *page, unsigned long nr_isolated)
{
if (!page)
return;

if (!nr_isolated)
set_pageblock_skip(page);
}
#else
static inline bool isolation_suitable(struct compact_control *cc,
struct page *page)
{
return true;
}

static void update_pageblock_skip(struct page *page, unsigned long nr_isolated)
{
}
#endif /* CONFIG_COMPACTION */

static inline bool should_release_lock(spinlock_t *lock)
{
return need_resched() || spin_is_contended(lock);
Expand Down Expand Up @@ -181,7 +254,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
bool strict)
{
int nr_scanned = 0, total_isolated = 0;
struct page *cursor;
struct page *cursor, *valid_page = NULL;
unsigned long nr_strict_required = end_pfn - blockpfn;
unsigned long flags;
bool locked = false;
Expand All @@ -196,6 +269,8 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
nr_scanned++;
if (!pfn_valid_within(blockpfn))
continue;
if (!valid_page)
valid_page = page;
if (!PageBuddy(page))
continue;

Expand Down Expand Up @@ -250,6 +325,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
if (locked)
spin_unlock_irqrestore(&cc->zone->lock, flags);

/* Update the pageblock-skip if the whole pageblock was scanned */
if (blockpfn == end_pfn)
update_pageblock_skip(valid_page, total_isolated);

return total_isolated;
}

Expand All @@ -267,22 +346,14 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
* a free page).
*/
unsigned long
isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
isolate_freepages_range(struct compact_control *cc,
unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long isolated, pfn, block_end_pfn;
struct zone *zone = NULL;
LIST_HEAD(freelist);

/* cc needed for isolate_freepages_block to acquire zone->lock */
struct compact_control cc = {
.sync = true,
};

if (pfn_valid(start_pfn))
cc.zone = zone = page_zone(pfn_to_page(start_pfn));

for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn)))
if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn)))
break;

/*
Expand All @@ -292,7 +363,7 @@ isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
block_end_pfn = min(block_end_pfn, end_pfn);

isolated = isolate_freepages_block(&cc, pfn, block_end_pfn,
isolated = isolate_freepages_block(cc, pfn, block_end_pfn,
&freelist, true);

/*
Expand Down Expand Up @@ -387,6 +458,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
struct lruvec *lruvec;
unsigned long flags;
bool locked = false;
struct page *page = NULL, *valid_page = NULL;

/*
* Ensure that there are not too many pages isolated from the LRU
Expand All @@ -407,8 +479,6 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
/* Time to isolate some pages for migration */
cond_resched();
for (; low_pfn < end_pfn; low_pfn++) {
struct page *page;

/* give a chance to irqs before checking need_resched() */
if (locked && !((low_pfn+1) % SWAP_CLUSTER_MAX)) {
if (should_release_lock(&zone->lru_lock)) {
Expand Down Expand Up @@ -444,6 +514,14 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
if (page_zone(page) != zone)
continue;

if (!valid_page)
valid_page = page;

/* If isolation recently failed, do not retry */
pageblock_nr = low_pfn >> pageblock_order;
if (!isolation_suitable(cc, page))
goto next_pageblock;

/* Skip if free */
if (PageBuddy(page))
continue;
Expand All @@ -453,7 +531,6 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
* migration is optimistic to see if the minimum amount of work
* satisfies the allocation
*/
pageblock_nr = low_pfn >> pageblock_order;
if (!cc->sync && last_pageblock_nr != pageblock_nr &&
!migrate_async_suitable(get_pageblock_migratetype(page))) {
goto next_pageblock;
Expand Down Expand Up @@ -530,6 +607,10 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
if (locked)
spin_unlock_irqrestore(&zone->lru_lock, flags);

/* Update the pageblock-skip if the whole pageblock was scanned */
if (low_pfn == end_pfn)
update_pageblock_skip(valid_page, nr_isolated);

trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);

return low_pfn;
Expand Down Expand Up @@ -593,6 +674,10 @@ static void isolate_freepages(struct zone *zone,
if (!suitable_migration_target(page))
continue;

/* If isolation recently failed, do not retry */
if (!isolation_suitable(cc, page))
continue;

/* Found a block suitable for isolating free pages from */
isolated = 0;
end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
Expand Down Expand Up @@ -709,8 +794,10 @@ static int compact_finished(struct zone *zone,
return COMPACT_PARTIAL;

/* Compaction run completes if the migrate and free scanner meet */
if (cc->free_pfn <= cc->migrate_pfn)
if (cc->free_pfn <= cc->migrate_pfn) {
reset_isolation_suitable(cc->zone);
return COMPACT_COMPLETE;
}

/*
* order == -1 is expected when compacting via
Expand Down Expand Up @@ -818,6 +905,10 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
cc->free_pfn &= ~(pageblock_nr_pages-1);

/* Clear pageblock skip if there are numerous alloc failures */
if (zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT)
reset_isolation_suitable(zone);

migrate_prep_local();

while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
Expand Down
4 changes: 3 additions & 1 deletion trunk/mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,7 @@ struct compact_control {
unsigned long free_pfn; /* isolate_freepages search base */
unsigned long migrate_pfn; /* isolate_migratepages search base */
bool sync; /* Synchronous migration */
bool ignore_skip_hint; /* Scan blocks even if marked skip */

int order; /* order a direct compactor needs */
int migratetype; /* MOVABLE, RECLAIMABLE etc */
Expand All @@ -129,7 +130,8 @@ struct compact_control {
};

unsigned long
isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn);
isolate_freepages_range(struct compact_control *cc,
unsigned long start_pfn, unsigned long end_pfn);
unsigned long
isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
unsigned long low_pfn, unsigned long end_pfn);
Expand Down
38 changes: 20 additions & 18 deletions trunk/mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -5679,33 +5679,26 @@ __alloc_contig_migrate_alloc(struct page *page, unsigned long private,
}

/* [start, end) must belong to a single zone. */
static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
static int __alloc_contig_migrate_range(struct compact_control *cc,
unsigned long start, unsigned long end)
{
/* This function is based on compact_zone() from compaction.c. */

unsigned long pfn = start;
unsigned int tries = 0;
int ret = 0;

struct compact_control cc = {
.nr_migratepages = 0,
.order = -1,
.zone = page_zone(pfn_to_page(start)),
.sync = true,
};
INIT_LIST_HEAD(&cc.migratepages);

migrate_prep_local();

while (pfn < end || !list_empty(&cc.migratepages)) {
while (pfn < end || !list_empty(&cc->migratepages)) {
if (fatal_signal_pending(current)) {
ret = -EINTR;
break;
}

if (list_empty(&cc.migratepages)) {
cc.nr_migratepages = 0;
pfn = isolate_migratepages_range(cc.zone, &cc,
if (list_empty(&cc->migratepages)) {
cc->nr_migratepages = 0;
pfn = isolate_migratepages_range(cc->zone, cc,
pfn, end);
if (!pfn) {
ret = -EINTR;
Expand All @@ -5717,14 +5710,14 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
break;
}

reclaim_clean_pages_from_list(cc.zone, &cc.migratepages);
reclaim_clean_pages_from_list(cc->zone, &cc->migratepages);

ret = migrate_pages(&cc.migratepages,
ret = migrate_pages(&cc->migratepages,
__alloc_contig_migrate_alloc,
0, false, MIGRATE_SYNC);
}

putback_lru_pages(&cc.migratepages);
putback_lru_pages(&cc->migratepages);
return ret > 0 ? 0 : ret;
}

Expand Down Expand Up @@ -5803,6 +5796,15 @@ int alloc_contig_range(unsigned long start, unsigned long end,
unsigned long outer_start, outer_end;
int ret = 0, order;

struct compact_control cc = {
.nr_migratepages = 0,
.order = -1,
.zone = page_zone(pfn_to_page(start)),
.sync = true,
.ignore_skip_hint = true,
};
INIT_LIST_HEAD(&cc.migratepages);

/*
* What we do here is we mark all pageblocks in range as
* MIGRATE_ISOLATE. Because pageblock and max order pages may
Expand Down Expand Up @@ -5832,7 +5834,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
if (ret)
goto done;

ret = __alloc_contig_migrate_range(start, end);
ret = __alloc_contig_migrate_range(&cc, start, end);
if (ret)
goto done;

Expand Down Expand Up @@ -5881,7 +5883,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
__reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start);

/* Grab isolated pages from freelists. */
outer_end = isolate_freepages_range(outer_start, end);
outer_end = isolate_freepages_range(&cc, outer_start, end);
if (!outer_end) {
ret = -EBUSY;
goto done;
Expand Down

0 comments on commit 2625201

Please sign in to comment.