Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 230712
b: refs/heads/master
c: 3e7d344
h: refs/heads/master
v: v3
  • Loading branch information
Mel Gorman authored and Linus Torvalds committed Jan 14, 2011
1 parent 005b693 commit fcd8a5b
Show file tree
Hide file tree
Showing 7 changed files with 197 additions and 50 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: ee64fc9354e515a79c7232cfde65c88ec627308b
refs/heads/master: 3e7d344970673c5334cf7b5bb27c8c0942b06126
14 changes: 14 additions & 0 deletions trunk/include/linux/compaction.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
extern int fragmentation_index(struct zone *zone, unsigned int order);
extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *mask);
extern unsigned long compaction_suitable(struct zone *zone, int order);
extern unsigned long compact_zone_order(struct zone *zone, int order,
gfp_t gfp_mask);

/* Do not skip compaction more than 64 times */
#define COMPACT_MAX_DEFER_SHIFT 6
Expand Down Expand Up @@ -59,6 +62,17 @@ static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
return COMPACT_CONTINUE;
}

static inline unsigned long compaction_suitable(struct zone *zone, int order)
{
return COMPACT_SKIPPED;
}

static inline unsigned long compact_zone_order(struct zone *zone, int order,
gfp_t gfp_mask)
{
return 0;
}

static inline void defer_compaction(struct zone *zone)
{
}
Expand Down
7 changes: 7 additions & 0 deletions trunk/include/linux/kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -600,6 +600,13 @@ struct sysinfo {
#define NUMA_BUILD 0
#endif

/* This helps us avoid #ifdef CONFIG_COMPACTION */
#ifdef CONFIG_COMPACTION
#define COMPACTION_BUILD 1
#else
#define COMPACTION_BUILD 0
#endif

/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
Expand Down
89 changes: 55 additions & 34 deletions trunk/mm/compaction.c
Original file line number Diff line number Diff line change
Expand Up @@ -384,10 +384,62 @@ static int compact_finished(struct zone *zone,
return COMPACT_CONTINUE;
}

/*
* compaction_suitable: Is this suitable to run compaction on this zone now?
* Returns
* COMPACT_SKIPPED - If there are too few free pages for compaction
* COMPACT_PARTIAL - If the allocation would succeed without compaction
* COMPACT_CONTINUE - If compaction should run now
*/
unsigned long compaction_suitable(struct zone *zone, int order)
{
int fragindex;
unsigned long watermark;

/*
* Watermarks for order-0 must be met for compaction. Note the 2UL.
* This is because during migration, copies of pages need to be
* allocated and for a short time, the footprint is higher
*/
watermark = low_wmark_pages(zone) + (2UL << order);
if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
return COMPACT_SKIPPED;

/*
* fragmentation index determines if allocation failures are due to
* low memory or external fragmentation
*
* index of -1 implies allocations might succeed dependingon watermarks
* index towards 0 implies failure is due to lack of memory
* index towards 1000 implies failure is due to fragmentation
*
* Only compact if a failure would be due to fragmentation.
*/
fragindex = fragmentation_index(zone, order);
if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
return COMPACT_SKIPPED;

if (fragindex == -1 && zone_watermark_ok(zone, order, watermark, 0, 0))
return COMPACT_PARTIAL;

return COMPACT_CONTINUE;
}

static int compact_zone(struct zone *zone, struct compact_control *cc)
{
int ret;

ret = compaction_suitable(zone, cc->order);
switch (ret) {
case COMPACT_PARTIAL:
case COMPACT_SKIPPED:
/* Compaction is likely to fail */
return ret;
case COMPACT_CONTINUE:
/* Fall through to compaction */
;
}

/* Setup to move all movable pages to the end of the zone */
cc->migrate_pfn = zone->zone_start_pfn;
cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
Expand Down Expand Up @@ -429,7 +481,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
return ret;
}

static unsigned long compact_zone_order(struct zone *zone,
unsigned long compact_zone_order(struct zone *zone,
int order, gfp_t gfp_mask)
{
struct compact_control cc = {
Expand Down Expand Up @@ -462,7 +514,6 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
int may_enter_fs = gfp_mask & __GFP_FS;
int may_perform_io = gfp_mask & __GFP_IO;
unsigned long watermark;
struct zoneref *z;
struct zone *zone;
int rc = COMPACT_SKIPPED;
Expand All @@ -480,43 +531,13 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
/* Compact each zone in the list */
for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
nodemask) {
int fragindex;
int status;

/*
* Watermarks for order-0 must be met for compaction. Note
* the 2UL. This is because during migration, copies of
* pages need to be allocated and for a short time, the
* footprint is higher
*/
watermark = low_wmark_pages(zone) + (2UL << order);
if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
continue;

/*
* fragmentation index determines if allocation failures are
* due to low memory or external fragmentation
*
* index of -1 implies allocations might succeed depending
* on watermarks
* index towards 0 implies failure is due to lack of memory
* index towards 1000 implies failure is due to fragmentation
*
* Only compact if a failure would be due to fragmentation.
*/
fragindex = fragmentation_index(zone, order);
if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
continue;

if (fragindex == -1 && zone_watermark_ok(zone, order, watermark, 0, 0)) {
rc = COMPACT_PARTIAL;
break;
}

status = compact_zone_order(zone, order, gfp_mask);
rc = max(status, rc);

if (zone_watermark_ok(zone, order, watermark, 0, 0))
/* If a normal allocation would succeed, stop compacting */
if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
break;
}

Expand Down
17 changes: 17 additions & 0 deletions trunk/mm/migrate.c
Original file line number Diff line number Diff line change
Expand Up @@ -639,6 +639,23 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
if (!trylock_page(page)) {
if (!force)
goto move_newpage;

/*
* It's not safe for direct compaction to call lock_page.
* For example, during page readahead pages are added locked
* to the LRU. Later, when the IO completes the pages are
* marked uptodate and unlocked. However, the queueing
* could be merging multiple pages for one bio (e.g.
* mpage_readpages). If an allocation happens for the
* second or third page, the process can end up locking
* the same page twice and deadlocking. Rather than
* trying to be clever about what pages can be locked,
* avoid the use of lock_page for direct compaction
* altogether.
*/
if (current->flags & PF_MEMALLOC)
goto move_newpage;

lock_page(page);
}

Expand Down
16 changes: 16 additions & 0 deletions trunk/mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1815,12 +1815,15 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
int migratetype, unsigned long *did_some_progress)
{
struct page *page;
struct task_struct *tsk = current;

if (!order || compaction_deferred(preferred_zone))
return NULL;

tsk->flags |= PF_MEMALLOC;
*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
nodemask);
tsk->flags &= ~PF_MEMALLOC;
if (*did_some_progress != COMPACT_SKIPPED) {

/* Page migration frees to the PCP lists but we want merging */
Expand Down Expand Up @@ -2121,6 +2124,19 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
/* Wait for some write requests to complete then retry */
wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
goto rebalance;
} else {
/*
* High-order allocations do not necessarily loop after
* direct reclaim and reclaim/compaction depends on compaction
* being called after reclaim so call directly if necessary
*/
page = __alloc_pages_direct_compact(gfp_mask, order,
zonelist, high_zoneidx,
nodemask,
alloc_flags, preferred_zone,
migratetype, &did_some_progress);
if (page)
goto got_pg;
}

nopage:
Expand Down
Loading

0 comments on commit fcd8a5b

Please sign in to comment.