Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 322087
b: refs/heads/master
c: c67fe37
h: refs/heads/master
i:
  322085: 81f5245
  322083: c66e455
  322079: b50968b
v: v3
  • Loading branch information
Mel Gorman authored and Linus Torvalds committed Aug 21, 2012
1 parent 419ba0c commit 9bda5f5
Show file tree
Hide file tree
Showing 5 changed files with 94 additions and 30 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: de74f1cc3b1e9730d9b58580cd11361d30cd182d
refs/heads/master: c67fe3752abe6ab47639e2f9b836900c3dc3da84
4 changes: 2 additions & 2 deletions trunk/include/linux/compaction.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
extern int fragmentation_index(struct zone *zone, unsigned int order);
extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *mask,
bool sync);
bool sync, bool *contended);
extern int compact_pgdat(pg_data_t *pgdat, int order);
extern unsigned long compaction_suitable(struct zone *zone, int order);

Expand Down Expand Up @@ -64,7 +64,7 @@ static inline bool compaction_deferred(struct zone *zone, int order)
#else
static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *nodemask,
bool sync)
bool sync, bool *contended)
{
return COMPACT_CONTINUE;
}
Expand Down
100 changes: 79 additions & 21 deletions trunk/mm/compaction.c
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,47 @@ static inline bool migrate_async_suitable(int migratetype)
return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
}

/*
* Compaction requires the taking of some coarse locks that are potentially
* very heavily contended. Check if the process needs to be scheduled or
* if the lock is contended. For async compaction, back out in the event
* if contention is severe. For sync compaction, schedule.
*
* Returns true if the lock is held.
* Returns false if the lock is released and compaction should abort
*/
static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
bool locked, struct compact_control *cc)
{
if (need_resched() || spin_is_contended(lock)) {
if (locked) {
spin_unlock_irqrestore(lock, *flags);
locked = false;
}

/* async aborts if taking too long or contended */
if (!cc->sync) {
if (cc->contended)
*cc->contended = true;
return false;
}

cond_resched();
if (fatal_signal_pending(current))
return false;
}

if (!locked)
spin_lock_irqsave(lock, *flags);
return true;
}

static inline bool compact_trylock_irqsave(spinlock_t *lock,
unsigned long *flags, struct compact_control *cc)
{
return compact_checklock_irqsave(lock, flags, false, cc);
}

/*
* Isolate free pages onto a private freelist. Caller must hold zone->lock.
* If @strict is true, will abort returning 0 on any invalid PFNs or non-free
Expand Down Expand Up @@ -173,16 +214,22 @@ isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
}

/* Update the number of anon and file isolated pages in the zone */
static void acct_isolated(struct zone *zone, struct compact_control *cc)
static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc)
{
struct page *page;
unsigned int count[2] = { 0, };

list_for_each_entry(page, &cc->migratepages, lru)
count[!!page_is_file_cache(page)]++;

__mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
__mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
/* If locked we can use the interrupt unsafe versions */
if (locked) {
__mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
__mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
} else {
mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
}
}

/* Similar to reclaim, but different enough that they don't share logic */
Expand Down Expand Up @@ -228,6 +275,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
struct list_head *migratelist = &cc->migratepages;
isolate_mode_t mode = 0;
struct lruvec *lruvec;
unsigned long flags;
bool locked;

/*
* Ensure that there are not too many pages isolated from the LRU
Expand All @@ -247,25 +296,22 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,

/* Time to isolate some pages for migration */
cond_resched();
spin_lock_irq(&zone->lru_lock);
spin_lock_irqsave(&zone->lru_lock, flags);
locked = true;
for (; low_pfn < end_pfn; low_pfn++) {
struct page *page;
bool locked = true;

/* give a chance to irqs before checking need_resched() */
if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) {
spin_unlock_irq(&zone->lru_lock);
spin_unlock_irqrestore(&zone->lru_lock, flags);
locked = false;
}
if (need_resched() || spin_is_contended(&zone->lru_lock)) {
if (locked)
spin_unlock_irq(&zone->lru_lock);
cond_resched();
spin_lock_irq(&zone->lru_lock);
if (fatal_signal_pending(current))
break;
} else if (!locked)
spin_lock_irq(&zone->lru_lock);

/* Check if it is ok to still hold the lock */
locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
locked, cc);
if (!locked)
break;

/*
* migrate_pfn does not necessarily start aligned to a
Expand Down Expand Up @@ -349,9 +395,10 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
}
}

acct_isolated(zone, cc);
acct_isolated(zone, locked, cc);

spin_unlock_irq(&zone->lru_lock);
if (locked)
spin_unlock_irqrestore(&zone->lru_lock, flags);

trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);

Expand Down Expand Up @@ -461,7 +508,16 @@ static void isolate_freepages(struct zone *zone,
* are disabled
*/
isolated = 0;
spin_lock_irqsave(&zone->lock, flags);

/*
* The zone lock must be held to isolate freepages. This
* unfortunately this is a very coarse lock and can be
* heavily contended if there are parallel allocations
* or parallel compactions. For async compaction do not
* spin on the lock
*/
if (!compact_trylock_irqsave(&zone->lock, &flags, cc))
break;
if (suitable_migration_target(page)) {
end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
isolated = isolate_freepages_block(pfn, end_pfn,
Expand Down Expand Up @@ -773,7 +829,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)

static unsigned long compact_zone_order(struct zone *zone,
int order, gfp_t gfp_mask,
bool sync)
bool sync, bool *contended)
{
struct compact_control cc = {
.nr_freepages = 0,
Expand All @@ -782,6 +838,7 @@ static unsigned long compact_zone_order(struct zone *zone,
.migratetype = allocflags_to_migratetype(gfp_mask),
.zone = zone,
.sync = sync,
.contended = contended,
};
INIT_LIST_HEAD(&cc.freepages);
INIT_LIST_HEAD(&cc.migratepages);
Expand All @@ -803,7 +860,7 @@ int sysctl_extfrag_threshold = 500;
*/
unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *nodemask,
bool sync)
bool sync, bool *contended)
{
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
int may_enter_fs = gfp_mask & __GFP_FS;
Expand All @@ -827,7 +884,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
nodemask) {
int status;

status = compact_zone_order(zone, order, gfp_mask, sync);
status = compact_zone_order(zone, order, gfp_mask, sync,
contended);
rc = max(status, rc);

/* If a normal allocation would succeed, stop compacting */
Expand Down
1 change: 1 addition & 0 deletions trunk/mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,7 @@ struct compact_control {
int order; /* order a direct compactor needs */
int migratetype; /* MOVABLE, RECLAIMABLE etc */
struct zone *zone;
bool *contended; /* True if a lock was contended */
};

unsigned long
Expand Down
17 changes: 11 additions & 6 deletions trunk/mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -2102,7 +2102,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
int migratetype, bool sync_migration,
bool *deferred_compaction,
bool *contended_compaction, bool *deferred_compaction,
unsigned long *did_some_progress)
{
struct page *page;
Expand All @@ -2117,7 +2117,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,

current->flags |= PF_MEMALLOC;
*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
nodemask, sync_migration);
nodemask, sync_migration,
contended_compaction);
current->flags &= ~PF_MEMALLOC;
if (*did_some_progress != COMPACT_SKIPPED) {

Expand Down Expand Up @@ -2163,7 +2164,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
int migratetype, bool sync_migration,
bool *deferred_compaction,
bool *contended_compaction, bool *deferred_compaction,
unsigned long *did_some_progress)
{
return NULL;
Expand Down Expand Up @@ -2336,6 +2337,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
unsigned long did_some_progress;
bool sync_migration = false;
bool deferred_compaction = false;
bool contended_compaction = false;

/*
* In the slowpath, we sanity check order to avoid ever trying to
Expand Down Expand Up @@ -2425,6 +2427,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
nodemask,
alloc_flags, preferred_zone,
migratetype, sync_migration,
&contended_compaction,
&deferred_compaction,
&did_some_progress);
if (page)
Expand All @@ -2434,10 +2437,11 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
/*
* If compaction is deferred for high-order allocations, it is because
* sync compaction recently failed. In this is the case and the caller
* has requested the system not be heavily disrupted, fail the
* allocation now instead of entering direct reclaim
* requested a movable allocation that does not heavily disrupt the
* system then fail the allocation instead of entering direct reclaim.
*/
if (deferred_compaction && (gfp_mask & __GFP_NO_KSWAPD))
if ((deferred_compaction || contended_compaction) &&
(gfp_mask & __GFP_NO_KSWAPD))
goto nopage;

/* Try direct reclaim and then allocating */
Expand Down Expand Up @@ -2508,6 +2512,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
nodemask,
alloc_flags, preferred_zone,
migratetype, sync_migration,
&contended_compaction,
&deferred_compaction,
&did_some_progress);
if (page)
Expand Down

0 comments on commit 9bda5f5

Please sign in to comment.