Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 332437
b: refs/heads/master
c: f40d1e4
h: refs/heads/master
i:
  332435: 398b79a
v: v3
  • Loading branch information
Mel Gorman authored and Linus Torvalds committed Oct 9, 2012
1 parent 3505c2e commit 42a9269
Show file tree
Hide file tree
Showing 2 changed files with 77 additions and 65 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 2a1402aa044b55c2d30ab0ed9405693ef06fb07c
refs/heads/master: f40d1e42bb988d2a26e8e111ea4c4c7bac819b7e
140 changes: 76 additions & 64 deletions trunk/mm/compaction.c
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,27 @@ static inline bool compact_trylock_irqsave(spinlock_t *lock,
return compact_checklock_irqsave(lock, flags, false, cc);
}

/* Returns true if the page is within a block suitable for migration to */
static bool suitable_migration_target(struct page *page)
{
int migratetype = get_pageblock_migratetype(page);

/* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
return false;

/* If the page is a large free page, then allow migration */
if (PageBuddy(page) && page_order(page) >= pageblock_order)
return true;

/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
if (migrate_async_suitable(migratetype))
return true;

/* Otherwise skip the block */
return false;
}

static void compact_capture_page(struct compact_control *cc)
{
unsigned long flags;
Expand Down Expand Up @@ -153,38 +174,56 @@ static void compact_capture_page(struct compact_control *cc)
* pages inside of the pageblock (even though it may still end up isolating
* some pages).
*/
static unsigned long isolate_freepages_block(unsigned long blockpfn,
static unsigned long isolate_freepages_block(struct compact_control *cc,
unsigned long blockpfn,
unsigned long end_pfn,
struct list_head *freelist,
bool strict)
{
int nr_scanned = 0, total_isolated = 0;
struct page *cursor;
unsigned long nr_strict_required = end_pfn - blockpfn;
unsigned long flags;
bool locked = false;

cursor = pfn_to_page(blockpfn);

/* Isolate free pages. This assumes the block is valid */
/* Isolate free pages. */
for (; blockpfn < end_pfn; blockpfn++, cursor++) {
int isolated, i;
struct page *page = cursor;

if (!pfn_valid_within(blockpfn)) {
if (strict)
return 0;
continue;
}
nr_scanned++;
if (!pfn_valid_within(blockpfn))
continue;
if (!PageBuddy(page))
continue;

/*
* The zone lock must be held to isolate freepages.
* Unfortunately this is a very coarse lock and can be
* heavily contended if there are parallel allocations
* or parallel compactions. For async compaction do not
* spin on the lock and we acquire the lock as late as
* possible.
*/
locked = compact_checklock_irqsave(&cc->zone->lock, &flags,
locked, cc);
if (!locked)
break;

/* Recheck this is a suitable migration target under lock */
if (!strict && !suitable_migration_target(page))
break;

if (!PageBuddy(page)) {
if (strict)
return 0;
/* Recheck this is a buddy page under lock */
if (!PageBuddy(page))
continue;
}

/* Found a free page, break it into order-0 pages */
isolated = split_free_page(page);
if (!isolated && strict)
return 0;
break;
total_isolated += isolated;
for (i = 0; i < isolated; i++) {
list_add(&page->lru, freelist);
Expand All @@ -199,6 +238,18 @@ static unsigned long isolate_freepages_block(unsigned long blockpfn,
}

trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);

/*
* If strict isolation is requested by CMA then check that all the
* pages requested were isolated. If there were any failures, 0 is
* returned and CMA will fail.
*/
if (strict && nr_strict_required != total_isolated)
total_isolated = 0;

if (locked)
spin_unlock_irqrestore(&cc->zone->lock, flags);

return total_isolated;
}

Expand All @@ -218,12 +269,17 @@ static unsigned long isolate_freepages_block(unsigned long blockpfn,
unsigned long
isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long isolated, pfn, block_end_pfn, flags;
unsigned long isolated, pfn, block_end_pfn;
struct zone *zone = NULL;
LIST_HEAD(freelist);

/* cc needed for isolate_freepages_block to acquire zone->lock */
struct compact_control cc = {
.sync = true,
};

if (pfn_valid(start_pfn))
zone = page_zone(pfn_to_page(start_pfn));
cc.zone = zone = page_zone(pfn_to_page(start_pfn));

for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn)))
Expand All @@ -236,10 +292,8 @@ isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
block_end_pfn = min(block_end_pfn, end_pfn);

spin_lock_irqsave(&zone->lock, flags);
isolated = isolate_freepages_block(pfn, block_end_pfn,
isolated = isolate_freepages_block(&cc, pfn, block_end_pfn,
&freelist, true);
spin_unlock_irqrestore(&zone->lock, flags);

/*
* In strict mode, isolate_freepages_block() returns 0 if
Expand Down Expand Up @@ -483,29 +537,6 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,

#endif /* CONFIG_COMPACTION || CONFIG_CMA */
#ifdef CONFIG_COMPACTION

/* Returns true if the page is within a block suitable for migration to */
static bool suitable_migration_target(struct page *page)
{

int migratetype = get_pageblock_migratetype(page);

/* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
return false;

/* If the page is a large free page, then allow migration */
if (PageBuddy(page) && page_order(page) >= pageblock_order)
return true;

/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
if (migrate_async_suitable(migratetype))
return true;

/* Otherwise skip the block */
return false;
}

/*
* Returns the start pfn of the last page block in a zone. This is the starting
* point for full compaction of a zone. Compaction searches for free pages from
Expand All @@ -529,7 +560,6 @@ static void isolate_freepages(struct zone *zone,
{
struct page *page;
unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
unsigned long flags;
int nr_freepages = cc->nr_freepages;
struct list_head *freelist = &cc->freepages;

Expand Down Expand Up @@ -577,30 +607,12 @@ static void isolate_freepages(struct zone *zone,
if (!suitable_migration_target(page))
continue;

/*
* Found a block suitable for isolating free pages from. Now
* we disabled interrupts, double check things are ok and
* isolate the pages. This is to minimise the time IRQs
* are disabled
*/
/* Found a block suitable for isolating free pages from */
isolated = 0;

/*
* The zone lock must be held to isolate freepages. This
* unfortunately this is a very coarse lock and can be
* heavily contended if there are parallel allocations
* or parallel compactions. For async compaction do not
* spin on the lock
*/
if (!compact_trylock_irqsave(&zone->lock, &flags, cc))
break;
if (suitable_migration_target(page)) {
end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
isolated = isolate_freepages_block(pfn, end_pfn,
freelist, false);
nr_freepages += isolated;
}
spin_unlock_irqrestore(&zone->lock, flags);
end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
isolated = isolate_freepages_block(cc, pfn, end_pfn,
freelist, false);
nr_freepages += isolated;

/*
* Record the highest PFN we isolated pages from. When next
Expand Down

0 comments on commit 42a9269

Please sign in to comment.