Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 28911
b: refs/heads/master
c: d6277db
h: refs/heads/master
i:
  28909: ad3d762
  28907: 433e990
  28903: 5cb9c74
  28895: 2d4ae56
v: v3
  • Loading branch information
Rafael J. Wysocki authored and Linus Torvalds committed Jun 23, 2006
1 parent 26a292c commit 3dfe7f1
Show file tree
Hide file tree
Showing 3 changed files with 173 additions and 58 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 7a7c381d25067b9a2bfe025dfcb16459daec0373
refs/heads/master: d6277db4ab271862ed599da08d78961c70f00002
10 changes: 8 additions & 2 deletions trunk/kernel/power/swsusp.c
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,12 @@ void free_all_swap_pages(int swap, struct bitmap_page *bitmap)
*/

#define SHRINK_BITE 10000
static inline unsigned long __shrink_memory(long tmp)
{
if (tmp > SHRINK_BITE)
tmp = SHRINK_BITE;
return shrink_all_memory(tmp);
}

int swsusp_shrink_memory(void)
{
Expand All @@ -195,12 +201,12 @@ int swsusp_shrink_memory(void)
if (!is_highmem(zone))
tmp -= zone->free_pages;
if (tmp > 0) {
tmp = shrink_all_memory(SHRINK_BITE);
tmp = __shrink_memory(tmp);
if (!tmp)
return -ENOMEM;
pages += tmp;
} else if (size > image_size / PAGE_SIZE) {
tmp = shrink_all_memory(SHRINK_BITE);
tmp = __shrink_memory(size - (image_size / PAGE_SIZE));
pages += tmp;
}
printk("\b%c", p[i++%4]);
Expand Down
219 changes: 164 additions & 55 deletions trunk/mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,8 @@ struct scan_control {
* In this context, it doesn't matter that we scan the
* whole list at once. */
int swap_cluster_max;

int swappiness;
};

/*
Expand Down Expand Up @@ -741,7 +743,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
* A 100% value of vm_swappiness overrides this algorithm
* altogether.
*/
swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
swap_tendency = mapped_ratio / 2 + distress + sc->swappiness;

/*
* Now use this metric to decide whether to start moving mapped
Expand Down Expand Up @@ -957,6 +959,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
.may_writepage = !laptop_mode,
.swap_cluster_max = SWAP_CLUSTER_MAX,
.may_swap = 1,
.swappiness = vm_swappiness,
};

inc_page_state(allocstall);
Expand Down Expand Up @@ -1021,10 +1024,6 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
* For kswapd, balance_pgdat() will work across all this node's zones until
* they are all at pages_high.
*
* If `nr_pages' is non-zero then it is the number of pages which are to be
* reclaimed, regardless of the zone occupancies. This is a software suspend
* special.
*
* Returns the number of pages which were actually freed.
*
* There is special handling here for zones which are full of pinned pages.
Expand All @@ -1042,10 +1041,8 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
* the page allocator fallback scheme to ensure that aging of pages is balanced
* across the zones.
*/
static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages,
int order)
static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
{
unsigned long to_free = nr_pages;
int all_zones_ok;
int priority;
int i;
Expand All @@ -1055,7 +1052,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages,
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
.may_swap = 1,
.swap_cluster_max = nr_pages ? nr_pages : SWAP_CLUSTER_MAX,
.swap_cluster_max = SWAP_CLUSTER_MAX,
.swappiness = vm_swappiness,
};

loop_again:
Expand All @@ -1082,31 +1080,26 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages,

all_zones_ok = 1;

if (nr_pages == 0) {
/*
* Scan in the highmem->dma direction for the highest
* zone which needs scanning
*/
for (i = pgdat->nr_zones - 1; i >= 0; i--) {
struct zone *zone = pgdat->node_zones + i;
/*
* Scan in the highmem->dma direction for the highest
* zone which needs scanning
*/
for (i = pgdat->nr_zones - 1; i >= 0; i--) {
struct zone *zone = pgdat->node_zones + i;

if (!populated_zone(zone))
continue;
if (!populated_zone(zone))
continue;

if (zone->all_unreclaimable &&
priority != DEF_PRIORITY)
continue;
if (zone->all_unreclaimable && priority != DEF_PRIORITY)
continue;

if (!zone_watermark_ok(zone, order,
zone->pages_high, 0, 0)) {
end_zone = i;
goto scan;
}
if (!zone_watermark_ok(zone, order, zone->pages_high,
0, 0)) {
end_zone = i;
goto scan;
}
goto out;
} else {
end_zone = pgdat->nr_zones - 1;
}
goto out;
scan:
for (i = 0; i <= end_zone; i++) {
struct zone *zone = pgdat->node_zones + i;
Expand All @@ -1133,11 +1126,9 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages,
if (zone->all_unreclaimable && priority != DEF_PRIORITY)
continue;

if (nr_pages == 0) { /* Not software suspend */
if (!zone_watermark_ok(zone, order,
zone->pages_high, end_zone, 0))
all_zones_ok = 0;
}
if (!zone_watermark_ok(zone, order, zone->pages_high,
end_zone, 0))
all_zones_ok = 0;
zone->temp_priority = priority;
if (zone->prev_priority > priority)
zone->prev_priority = priority;
Expand All @@ -1162,8 +1153,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages,
total_scanned > nr_reclaimed + nr_reclaimed / 2)
sc.may_writepage = 1;
}
if (nr_pages && to_free > nr_reclaimed)
continue; /* swsusp: need to do more work */
if (all_zones_ok)
break; /* kswapd: all done */
/*
Expand All @@ -1179,7 +1168,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages,
* matches the direct reclaim path behaviour in terms of impact
* on zone->*_priority.
*/
if ((nr_reclaimed >= SWAP_CLUSTER_MAX) && !nr_pages)
if (nr_reclaimed >= SWAP_CLUSTER_MAX)
break;
}
out:
Expand Down Expand Up @@ -1261,7 +1250,7 @@ static int kswapd(void *p)
}
finish_wait(&pgdat->kswapd_wait, &wait);

balance_pgdat(pgdat, 0, order);
balance_pgdat(pgdat, order);
}
return 0;
}
Expand Down Expand Up @@ -1290,35 +1279,154 @@ void wakeup_kswapd(struct zone *zone, int order)

#ifdef CONFIG_PM
/*
* Try to free `nr_pages' of memory, system-wide. Returns the number of freed
* pages.
* Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages
* from LRU lists system-wide, for given pass and priority, and returns the
* number of reclaimed pages
*
* For pass > 3 we also try to shrink the LRU lists that contain a few pages
*/
static unsigned long shrink_all_zones(unsigned long nr_pages, int pass,
int prio, struct scan_control *sc)
{
struct zone *zone;
unsigned long nr_to_scan, ret = 0;

for_each_zone(zone) {

if (!populated_zone(zone))
continue;

if (zone->all_unreclaimable && prio != DEF_PRIORITY)
continue;

/* For pass = 0 we don't shrink the active list */
if (pass > 0) {
zone->nr_scan_active += (zone->nr_active >> prio) + 1;
if (zone->nr_scan_active >= nr_pages || pass > 3) {
zone->nr_scan_active = 0;
nr_to_scan = min(nr_pages, zone->nr_active);
shrink_active_list(nr_to_scan, zone, sc);
}
}

zone->nr_scan_inactive += (zone->nr_inactive >> prio) + 1;
if (zone->nr_scan_inactive >= nr_pages || pass > 3) {
zone->nr_scan_inactive = 0;
nr_to_scan = min(nr_pages, zone->nr_inactive);
ret += shrink_inactive_list(nr_to_scan, zone, sc);
if (ret >= nr_pages)
return ret;
}
}

return ret;
}

/*
* Try to free `nr_pages' of memory, system-wide, and return the number of
* freed pages.
*
* Rather than trying to age LRUs the aim is to preserve the overall
* LRU order by reclaiming preferentially
* inactive > active > active referenced > active mapped
*/
unsigned long shrink_all_memory(unsigned long nr_pages)
{
pg_data_t *pgdat;
unsigned long nr_to_free = nr_pages;
unsigned long lru_pages, nr_slab;
unsigned long ret = 0;
unsigned retry = 2;
struct reclaim_state reclaim_state = {
.reclaimed_slab = 0,
int pass;
struct reclaim_state reclaim_state;
struct zone *zone;
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
.may_swap = 0,
.swap_cluster_max = nr_pages,
.may_writepage = 1,
.swappiness = vm_swappiness,
};

current->reclaim_state = &reclaim_state;
repeat:
for_each_online_pgdat(pgdat) {
unsigned long freed;

freed = balance_pgdat(pgdat, nr_to_free, 0);
ret += freed;
nr_to_free -= freed;
if ((long)nr_to_free <= 0)
lru_pages = 0;
for_each_zone(zone)
lru_pages += zone->nr_active + zone->nr_inactive;

nr_slab = read_page_state(nr_slab);
/* If slab caches are huge, it's better to hit them first */
while (nr_slab >= lru_pages) {
reclaim_state.reclaimed_slab = 0;
shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
if (!reclaim_state.reclaimed_slab)
break;

ret += reclaim_state.reclaimed_slab;
if (ret >= nr_pages)
goto out;

nr_slab -= reclaim_state.reclaimed_slab;
}
if (retry-- && ret < nr_pages) {
blk_congestion_wait(WRITE, HZ/5);
goto repeat;

/*
* We try to shrink LRUs in 5 passes:
* 0 = Reclaim from inactive_list only
* 1 = Reclaim from active list but don't reclaim mapped
* 2 = 2nd pass of type 1
* 3 = Reclaim mapped (normal reclaim)
* 4 = 2nd pass of type 3
*/
for (pass = 0; pass < 5; pass++) {
int prio;

/* Needed for shrinking slab caches later on */
if (!lru_pages)
for_each_zone(zone) {
lru_pages += zone->nr_active;
lru_pages += zone->nr_inactive;
}

/* Force reclaiming mapped pages in the passes #3 and #4 */
if (pass > 2) {
sc.may_swap = 1;
sc.swappiness = 100;
}

for (prio = DEF_PRIORITY; prio >= 0; prio--) {
unsigned long nr_to_scan = nr_pages - ret;

sc.nr_mapped = read_page_state(nr_mapped);
sc.nr_scanned = 0;

ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
if (ret >= nr_pages)
goto out;

reclaim_state.reclaimed_slab = 0;
shrink_slab(sc.nr_scanned, sc.gfp_mask, lru_pages);
ret += reclaim_state.reclaimed_slab;
if (ret >= nr_pages)
goto out;

if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
blk_congestion_wait(WRITE, HZ / 10);
}

lru_pages = 0;
}

/*
* If ret = 0, we could not shrink LRUs, but there may be something
* in slab caches
*/
if (!ret)
do {
reclaim_state.reclaimed_slab = 0;
shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
ret += reclaim_state.reclaimed_slab;
} while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);

out:
current->reclaim_state = NULL;

return ret;
}
#endif
Expand Down Expand Up @@ -1416,6 +1524,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
.swap_cluster_max = max_t(unsigned long, nr_pages,
SWAP_CLUSTER_MAX),
.gfp_mask = gfp_mask,
.swappiness = vm_swappiness,
};

disable_swap_token();
Expand Down

0 comments on commit 3dfe7f1

Please sign in to comment.