Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 332441
b: refs/heads/master
c: 6299702
h: refs/heads/master
i:
  332439: 2625201
v: v3
  • Loading branch information
Mel Gorman authored and Linus Torvalds committed Oct 9, 2012
1 parent de7720a commit 7d1dea6
Show file tree
Hide file tree
Showing 6 changed files with 61 additions and 18 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: c89511ab2f8fe2b47585e60da8af7fd213ec877e
refs/heads/master: 62997027ca5b3d4618198ed8b1aba40b61b1137b
15 changes: 15 additions & 0 deletions trunk/include/linux/compaction.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *mask,
bool sync, bool *contended, struct page **page);
extern int compact_pgdat(pg_data_t *pgdat, int order);
extern void reset_isolation_suitable(pg_data_t *pgdat);
extern unsigned long compaction_suitable(struct zone *zone, int order);

/* Do not skip compaction more than 64 times */
Expand Down Expand Up @@ -61,6 +62,16 @@ static inline bool compaction_deferred(struct zone *zone, int order)
return zone->compact_considered < defer_limit;
}

/* Returns true if restarting compaction after many failures */
static inline bool compaction_restarting(struct zone *zone, int order)
{
if (order < zone->compact_order_failed)
return false;

return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
zone->compact_considered >= 1UL << zone->compact_defer_shift;
}

#else
static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *nodemask,
Expand All @@ -74,6 +85,10 @@ static inline int compact_pgdat(pg_data_t *pgdat, int order)
return COMPACT_CONTINUE;
}

static inline void reset_isolation_suitable(pg_data_t *pgdat)
{
}

static inline unsigned long compaction_suitable(struct zone *zone, int order)
{
return COMPACT_SKIPPED;
Expand Down
3 changes: 2 additions & 1 deletion trunk/include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -370,7 +370,8 @@ struct zone {
spinlock_t lock;
int all_unreclaimable; /* All pages pinned */
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
unsigned long compact_blockskip_expire;
/* Set to true when the PG_migrate_skip bits should be cleared */
bool compact_blockskip_flush;

/* pfns where compaction scanners should start */
unsigned long compact_cached_free_pfn;
Expand Down
50 changes: 34 additions & 16 deletions trunk/mm/compaction.c
Original file line number Diff line number Diff line change
Expand Up @@ -66,24 +66,15 @@ static inline bool isolation_suitable(struct compact_control *cc,
* should be skipped for page isolation when the migrate and free page scanner
* meet.
*/
static void reset_isolation_suitable(struct zone *zone)
static void __reset_isolation_suitable(struct zone *zone)
{
unsigned long start_pfn = zone->zone_start_pfn;
unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
unsigned long pfn;

/*
* Do not reset more than once every five seconds. If allocations are
* failing sufficiently quickly to allow this to happen then continually
* scanning for compaction is not going to help. The choice of five
* seconds is arbitrary but will mitigate excessive scanning.
*/
if (time_before(jiffies, zone->compact_blockskip_expire))
return;

zone->compact_cached_migrate_pfn = start_pfn;
zone->compact_cached_free_pfn = end_pfn;
zone->compact_blockskip_expire = jiffies + (HZ * 5);
zone->compact_blockskip_flush = false;

/* Walk the zone and mark every pageblock as suitable for isolation */
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
Expand All @@ -102,9 +93,24 @@ static void reset_isolation_suitable(struct zone *zone)
}
}

void reset_isolation_suitable(pg_data_t *pgdat)
{
int zoneid;

for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
struct zone *zone = &pgdat->node_zones[zoneid];
if (!populated_zone(zone))
continue;

/* Only flush if a full compaction finished recently */
if (zone->compact_blockskip_flush)
__reset_isolation_suitable(zone);
}
}

/*
* If no pages were isolated then mark this pageblock to be skipped in the
* future. The information is later cleared by reset_isolation_suitable().
* future. The information is later cleared by __reset_isolation_suitable().
*/
static void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long nr_isolated,
Expand Down Expand Up @@ -820,7 +826,15 @@ static int compact_finished(struct zone *zone,

/* Compaction run completes if the migrate and free scanner meet */
if (cc->free_pfn <= cc->migrate_pfn) {
reset_isolation_suitable(cc->zone);
/*
* Mark that the PG_migrate_skip information should be cleared
* by kswapd when it goes to sleep. kswapd does not set the
* flag itself as the decision to be clear should be directly
* based on an allocation request.
*/
if (!current_is_kswapd())
zone->compact_blockskip_flush = true;

return COMPACT_COMPLETE;
}

Expand Down Expand Up @@ -943,9 +957,13 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
zone->compact_cached_migrate_pfn = cc->migrate_pfn;
}

/* Clear pageblock skip if there are numerous alloc failures */
if (zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT)
reset_isolation_suitable(zone);
/*
* Clear pageblock skip if there were failures recently and compaction
* is about to be retried after being deferred. kswapd does not do
* this reset as it'll reset the cached information when going to sleep.
*/
if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
__reset_isolation_suitable(zone);

migrate_prep_local();

Expand Down
1 change: 1 addition & 0 deletions trunk/mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -2172,6 +2172,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
preferred_zone, migratetype);
if (page) {
got_page:
preferred_zone->compact_blockskip_flush = false;
preferred_zone->compact_considered = 0;
preferred_zone->compact_defer_shift = 0;
if (order >= preferred_zone->compact_order_failed)
Expand Down
8 changes: 8 additions & 0 deletions trunk/mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -2895,6 +2895,14 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
*/
set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);

/*
* Compaction records what page blocks it recently failed to
* isolate pages from and skips them in the future scanning.
* When kswapd is going to sleep, it is reasonable to assume
* that pages and compaction may succeed so reset the cache.
*/
reset_isolation_suitable(pgdat);

if (!kthread_should_stop())
schedule();

Expand Down

0 comments on commit 7d1dea6

Please sign in to comment.