Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 343312
b: refs/heads/master
c: c8bf2d8
h: refs/heads/master
v: v3
  • Loading branch information
Thierry Reding authored and Linus Torvalds committed Dec 13, 2012
1 parent cde28a7 commit e562365
Show file tree
Hide file tree
Showing 2 changed files with 55 additions and 55 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 3ea41e6210fea3b234b6cb3e9443e75975850bbf
refs/heads/master: c8bf2d8ba4fbc093de7c0d192fe5d2531f14b8b9
108 changes: 54 additions & 54 deletions trunk/mm/compaction.c
Original file line number Diff line number Diff line change
Expand Up @@ -215,60 +215,6 @@ static bool suitable_migration_target(struct page *page)
return false;
}

static void compact_capture_page(struct compact_control *cc)
{
unsigned long flags;
int mtype, mtype_low, mtype_high;

if (!cc->page || *cc->page)
return;

/*
* For MIGRATE_MOVABLE allocations we capture a suitable page ASAP
* regardless of the migratetype of the freelist is is captured from.
* This is fine because the order for a high-order MIGRATE_MOVABLE
* allocation is typically at least a pageblock size and overall
* fragmentation is not impaired. Other allocation types must
* capture pages from their own migratelist because otherwise they
* could pollute other pageblocks like MIGRATE_MOVABLE with
* difficult to move pages and making fragmentation worse overall.
*/
if (cc->migratetype == MIGRATE_MOVABLE) {
mtype_low = 0;
mtype_high = MIGRATE_PCPTYPES;
} else {
mtype_low = cc->migratetype;
mtype_high = cc->migratetype + 1;
}

/* Speculatively examine the free lists without zone lock */
for (mtype = mtype_low; mtype < mtype_high; mtype++) {
int order;
for (order = cc->order; order < MAX_ORDER; order++) {
struct page *page;
struct free_area *area;
area = &(cc->zone->free_area[order]);
if (list_empty(&area->free_list[mtype]))
continue;

/* Take the lock and attempt capture of the page */
if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc))
return;
if (!list_empty(&area->free_list[mtype])) {
page = list_entry(area->free_list[mtype].next,
struct page, lru);
if (capture_free_page(page, cc->order, mtype)) {
spin_unlock_irqrestore(&cc->zone->lock,
flags);
*cc->page = page;
return;
}
}
spin_unlock_irqrestore(&cc->zone->lock, flags);
}
}
}

/*
* Isolate free pages onto a private freelist. Caller must hold zone->lock.
* If @strict is true, will abort returning 0 on any invalid PFNs or non-free
Expand Down Expand Up @@ -953,6 +899,60 @@ unsigned long compaction_suitable(struct zone *zone, int order)
return COMPACT_CONTINUE;
}

static void compact_capture_page(struct compact_control *cc)
{
unsigned long flags;
int mtype, mtype_low, mtype_high;

if (!cc->page || *cc->page)
return;

/*
* For MIGRATE_MOVABLE allocations we capture a suitable page ASAP
* regardless of the migratetype of the freelist is is captured from.
* This is fine because the order for a high-order MIGRATE_MOVABLE
* allocation is typically at least a pageblock size and overall
* fragmentation is not impaired. Other allocation types must
* capture pages from their own migratelist because otherwise they
* could pollute other pageblocks like MIGRATE_MOVABLE with
* difficult to move pages and making fragmentation worse overall.
*/
if (cc->migratetype == MIGRATE_MOVABLE) {
mtype_low = 0;
mtype_high = MIGRATE_PCPTYPES;
} else {
mtype_low = cc->migratetype;
mtype_high = cc->migratetype + 1;
}

/* Speculatively examine the free lists without zone lock */
for (mtype = mtype_low; mtype < mtype_high; mtype++) {
int order;
for (order = cc->order; order < MAX_ORDER; order++) {
struct page *page;
struct free_area *area;
area = &(cc->zone->free_area[order]);
if (list_empty(&area->free_list[mtype]))
continue;

/* Take the lock and attempt capture of the page */
if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc))
return;
if (!list_empty(&area->free_list[mtype])) {
page = list_entry(area->free_list[mtype].next,
struct page, lru);
if (capture_free_page(page, cc->order, mtype)) {
spin_unlock_irqrestore(&cc->zone->lock,
flags);
*cc->page = page;
return;
}
}
spin_unlock_irqrestore(&cc->zone->lock, flags);
}
}
}

static int compact_zone(struct zone *zone, struct compact_control *cc)
{
int ret;
Expand Down

0 comments on commit e562365

Please sign in to comment.